From 930102bb5d272318c64d28860b3ff0d7435aa79b Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Wed, 8 Dec 2010 10:01:04 +0000 Subject: Add 'return' stats --- src/rabbit_channel.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 0c8ad00a..ada63ca2 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1219,10 +1219,16 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, MsgSeqNo, Message, State) -> +process_routing_result(unroutable, _, MsgSeqNo, + Message = #basic_message{exchange_name = ExchangeName}, + State) -> + maybe_incr_stats([{ExchangeName, 1}], return, State), ok = basic_return(Message, State#ch.writer_pid, no_route), send_or_enqueue_ack(MsgSeqNo, undefined, State); -process_routing_result(not_delivered, _, MsgSeqNo, Message, State) -> +process_routing_result(not_delivered, _, MsgSeqNo, + Message = #basic_message{exchange_name = ExchangeName}, + State) -> + maybe_incr_stats([{ExchangeName, 1}], return, State), ok = basic_return(Message, State#ch.writer_pid, no_consumers), send_or_enqueue_ack(MsgSeqNo, undefined, State); process_routing_result(routed, [], MsgSeqNo, _, State) -> -- cgit v1.2.1 From 98c743c563055ded2d8dc34182b05795c295c17c Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Wed, 8 Dec 2010 13:09:26 +0000 Subject: moved maybe_incr_stats to basic_return --- src/rabbit_channel.erl | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index ada63ca2..50677fc6 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1070,11 +1070,12 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, basic_return(#basic_message{exchange_name = ExchangeName, routing_key = RoutingKey, content = Content}, - WriterPid, Reason) -> + State, Reason) -> + maybe_incr_stats([{ExchangeName, 1}], return, State), {_Close, ReplyCode, ReplyText} = rabbit_framing_amqp_0_9_1:lookup_amqp_exception(Reason), ok = rabbit_writer:send_command( - WriterPid, + State#ch.writer_pid, #'basic.return'{reply_code = ReplyCode, reply_text = ReplyText, exchange = ExchangeName#resource.name, @@ -1219,17 +1220,11 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, MsgSeqNo, - Message = #basic_message{exchange_name = ExchangeName}, - State) -> - maybe_incr_stats([{ExchangeName, 1}], return, State), - ok = basic_return(Message, State#ch.writer_pid, no_route), +process_routing_result(unroutable, _, MsgSeqNo, Message, State) -> + ok = basic_return(Message, State, no_route), send_or_enqueue_ack(MsgSeqNo, undefined, State); -process_routing_result(not_delivered, _, MsgSeqNo, - Message = #basic_message{exchange_name = ExchangeName}, - State) -> - maybe_incr_stats([{ExchangeName, 1}], return, State), - ok = basic_return(Message, State#ch.writer_pid, no_consumers), +process_routing_result(not_delivered, _, MsgSeqNo, Message, State) -> + ok = basic_return(Message, State, no_consumers), send_or_enqueue_ack(MsgSeqNo, undefined, State); process_routing_result(routed, [], MsgSeqNo, _, State) -> send_or_enqueue_ack(MsgSeqNo, undefined, State); -- cgit v1.2.1 From 63f41f09a6d700d4e1714a96a074fb9f71091deb Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Wed, 8 Dec 2010 17:13:14 -0800 Subject: Clients can now override frame_max in RabbitMQ server. --- src/rabbit_reader.erl | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 4dd150a2..b6df9c98 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -50,7 +50,6 @@ -define(CLOSING_TIMEOUT, 1). -define(CHANNEL_TERMINATION_TIMEOUT, 3). -define(SILENT_CLOSE_DELAY, 3). --define(FRAME_MAX, 131072). %% set to zero once QPid fix their negotiation %--------------------------------------------------------------------------- @@ -739,7 +738,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism, sock = Sock}) -> User = rabbit_access_control:check_login(Mechanism, Response), Tune = #'connection.tune'{channel_max = 0, - frame_max = ?FRAME_MAX, + frame_max = my_frame_max(), heartbeat = 0}, ok = send_on_channel0(Sock, Tune, Protocol), State#v1{connection_state = tuning, @@ -752,14 +751,15 @@ handle_method0(#'connection.tune_ok'{frame_max = FrameMax, connection = Connection, sock = Sock, start_heartbeat_fun = SHF}) -> - if (FrameMax /= 0) and (FrameMax < ?FRAME_MIN_SIZE) -> + MyFrameMax = my_frame_max(), + if FrameMax /= 0 andalso FrameMax < ?FRAME_MIN_SIZE -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w < ~w min size", [FrameMax, ?FRAME_MIN_SIZE]); - (?FRAME_MAX /= 0) and (FrameMax > ?FRAME_MAX) -> + MyFrameMax /= 0 andalso FrameMax > MyFrameMax -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w > ~w max size", - [FrameMax, ?FRAME_MAX]); + [FrameMax, MyFrameMax]); true -> SendFun = fun() -> @@ -824,6 +824,14 @@ handle_method0(_Method, #v1{connection_state = S}) -> rabbit_misc:protocol_error( channel_error, "unexpected method in connection state ~w", [S]). +%% Compute frame_max for this instance. Could use 0, but breaks QPid Java +%% client. Default is 131072, but user can override in rabbitmq.config. +my_frame_max() -> + case application:get_env(rabbit, frame_max) of + {ok, FM} -> FM; + _ -> 131072 + end. + send_on_channel0(Sock, Method, Protocol) -> ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -- cgit v1.2.1 From d6d7b3bee95eee4137b041a3346b22988272b99e Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Tue, 14 Dec 2010 16:16:01 -0800 Subject: Put in Matthias's changes. --- ebin/rabbit_app.in | 1 + src/rabbit_reader.erl | 20 +++++++++----------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 6c33ef8b..d3808a54 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -19,6 +19,7 @@ {vm_memory_high_watermark, 0.4}, {msg_store_index_module, rabbit_msg_store_ets_index}, {backing_queue_module, rabbit_variable_queue}, + {frame_max, 131072}, {persister_max_wrap_entries, 500}, {persister_hibernate_after, 10000}, {msg_store_file_size_limit, 16777216}, diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index b6df9c98..cdb3586a 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -738,7 +738,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism, sock = Sock}) -> User = rabbit_access_control:check_login(Mechanism, Response), Tune = #'connection.tune'{channel_max = 0, - frame_max = my_frame_max(), + frame_max = server_frame_max(), heartbeat = 0}, ok = send_on_channel0(Sock, Tune, Protocol), State#v1{connection_state = tuning, @@ -751,15 +751,15 @@ handle_method0(#'connection.tune_ok'{frame_max = FrameMax, connection = Connection, sock = Sock, start_heartbeat_fun = SHF}) -> - MyFrameMax = my_frame_max(), + ServerFrameMax = server_frame_max(), if FrameMax /= 0 andalso FrameMax < ?FRAME_MIN_SIZE -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w < ~w min size", [FrameMax, ?FRAME_MIN_SIZE]); - MyFrameMax /= 0 andalso FrameMax > MyFrameMax -> + ServerFrameMax /= 0 andalso FrameMax > ServerFrameMax -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w > ~w max size", - [FrameMax, MyFrameMax]); + [FrameMax, ServerFrameMax]); true -> SendFun = fun() -> @@ -824,13 +824,11 @@ handle_method0(_Method, #v1{connection_state = S}) -> rabbit_misc:protocol_error( channel_error, "unexpected method in connection state ~w", [S]). -%% Compute frame_max for this instance. Could use 0, but breaks QPid Java -%% client. Default is 131072, but user can override in rabbitmq.config. -my_frame_max() -> - case application:get_env(rabbit, frame_max) of - {ok, FM} -> FM; - _ -> 131072 - end. +%% Compute frame_max for this instance. Could simply use 0, but breaks +%% QPid Java client. +server_frame_max() -> + {ok, FrameMax} = application:get_env(rabbit, frame_max), + FrameMax. send_on_channel0(Sock, Method, Protocol) -> ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -- cgit v1.2.1 From fe3a8699396d5ea3d9e4d0f67ab411adbf9a24d5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 13:39:08 +0000 Subject: Sketch of how clustered upgrades might work. --- src/rabbit_mnesia.erl | 81 ++++++++++++++++++++++++++++------------ src/rabbit_queue_index.erl | 2 +- src/rabbit_upgrade.erl | 52 ++++++++++++++++---------- src/rabbit_upgrade_functions.erl | 33 ++++++++++++++-- 4 files changed, 120 insertions(+), 48 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 11f5e410..2550bdd4 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1]). + forget_other_nodes/0, empty_ram_only_tables/0, copy_db/1]). -export([table_names/0]). @@ -66,6 +66,7 @@ -spec(is_clustered/0 :: () -> boolean()). -spec(running_clustered_nodes/0 :: () -> [node()]). -spec(all_clustered_nodes/0 :: () -> [node()]). +-spec(forget_other_nodes/0 :: () -> 'ok'). -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). @@ -126,8 +127,8 @@ cluster(ClusterNodes, Force) -> %% return node to its virgin state, where it is not member of any %% cluster, has no cluster configuration, no local database, and no %% persisted messages -reset() -> reset(false). -force_reset() -> reset(true). +reset() -> reset(all). +force_reset() -> reset(force_all). is_clustered() -> RunningNodes = running_clustered_nodes(), @@ -139,6 +140,10 @@ all_clustered_nodes() -> running_clustered_nodes() -> mnesia:system_info(running_db_nodes). +forget_other_nodes() -> + Nodes = all_clustered_nodes() -- [node()], + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes]. + empty_ram_only_tables() -> Node = node(), lists:foreach( @@ -385,32 +390,54 @@ init_db(ClusterNodes, Force) -> {[], true, [_]} -> %% True single disc node, attempt upgrade ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade() of + case rabbit_upgrade:maybe_upgrade([mnesia, local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; {[], true, _} -> %% "Master" (i.e. without config) disc node in cluster, - %% verify schema + %% do upgrade ok = wait_for_tables(), - ensure_version_ok(rabbit_upgrade:read_version()), - ensure_schema_ok(); + case rabbit_upgrade:maybe_upgrade([mnesia, local]) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; {[], false, _} -> %% Nothing there at all, start from scratch ok = create_schema(); {[AnotherNode|_], _, _} -> %% Subsequent node in cluster, catch up - ensure_version_ok(rabbit_upgrade:read_version()), - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), + case IsDiskNode of + true -> + %% TODO test this branch ;) + %% TODO don't just reset every time we start up! + mnesia:stop(), + reset(mnesia), + mnesia:start(), + %% TODO what should we ensure? + %% ensure_version_ok(rabbit_upgrade:read_version()), + %% ensure_version_ok( + %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + %% TODO needed? + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(disc); + false -> + ok = wait_for_replicated_tables(), + %% TODO can we live without this on disc? + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(ram), + case rabbit_upgrade:maybe_upgrade([local]) of + ok -> + ok; + %% If we're just starting up a new node + %% we won't have a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end + end, ensure_schema_ok() end; {error, Reason} -> @@ -563,12 +590,15 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. -reset(Force) -> +%% Mode: force_all - get rid of everything unconditionally +%% all - get rid of everything, conditional on Mnesia working +%% mnesia - just get rid of Mnesia, leave everything else +reset(Mode) -> ok = ensure_mnesia_not_running(), Node = node(), - case Force of - true -> ok; - false -> + case Mode of + force_all -> ok; + _ -> ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), {Nodes, RunningNodes} = @@ -583,9 +613,14 @@ reset(Force) -> rabbit_misc:ensure_ok(mnesia:delete_schema([Node]), cannot_delete_schema) end, - ok = delete_cluster_nodes_config(), - %% remove persisted messages and any other garbage we find - ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")), + case Mode of + mnesia -> + ok; + _ -> + ok = delete_cluster_nodes_config(), + %% remove persisted messages and any other garbage we find + ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")) + end, ok. leave_cluster([], _) -> ok; diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 76c0a4ef..6adcd8b0 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -182,7 +182,7 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({add_queue_ttl, []}). +-rabbit_upgrade({add_queue_ttl, local, []}). -ifdef(use_specs). diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 97a07514..dee08f48 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/0, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/1, read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -33,9 +33,10 @@ -ifdef(use_specs). -type(step() :: atom()). +-type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/0 :: () -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -47,24 +48,28 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade() -> +maybe_upgrade(Scopes) -> case read_version() of {ok, CurrentHeads} -> with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> case upgrades_to_apply(CurrentHeads, G) of - [] -> ok; - Upgrades -> apply_upgrades(Upgrades) - end; - Unknown -> throw({error, - {future_upgrades_found, Unknown}}) - end - end); + fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, G) end); {error, enoent} -> version_not_available end. +maybe_upgrade_graph(CurrentHeads, Scopes, G) -> + case unknown_heads(CurrentHeads, G) of + [] -> + case upgrades_to_apply(CurrentHeads, Scopes, G) of + [] -> + ok; + Upgrades -> + apply_upgrades(Upgrades, lists:member(mnesia, Scopes)) + end; + Unknown -> + throw({error, {future_upgrades_found, Unknown}}) + end. + read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [Heads]} -> {ok, Heads}; @@ -98,16 +103,17 @@ with_upgrade_graph(Fun) -> end. vertices(Module, Steps) -> - [{StepName, {Module, StepName}} || {StepName, _Reqs} <- Steps]. + [{StepName, {Scope, {Module, StepName}}} || + {StepName, Scope, _Reqs} <- Steps]. edges(_Module, Steps) -> - [{Require, StepName} || {StepName, Requires} <- Steps, Require <- Requires]. - + [{Require, StepName} || {StepName, _Scope, Requires} <- Steps, + Require <- Requires]. unknown_heads(Heads, G) -> [H || H <- Heads, digraph:vertex(G, H) =:= false]. -upgrades_to_apply(Heads, G) -> +upgrades_to_apply(Heads, Scopes, G) -> %% Take all the vertices which can reach the known heads. That's %% everything we've already applied. Subtract that from all %% vertices: that's what we have to apply. @@ -117,15 +123,17 @@ upgrades_to_apply(Heads, G) -> sets:from_list(digraph_utils:reaching(Heads, G)))), %% Form a subgraph from that list and find a topological ordering %% so we can invoke them in order. - [element(2, digraph:vertex(G, StepName)) || - StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. + Sorted = [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))], + %% Only return the upgrades for the appropriate scopes + [Upgrade || {Scope, Upgrade} <- Sorted, lists:member(Scope, Scopes)]. heads(G) -> lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). %% ------------------------------------------------------------------- -apply_upgrades(Upgrades) -> +apply_upgrades(Upgrades, ForgetOthers) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -140,6 +148,10 @@ apply_upgrades(Upgrades) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + case ForgetOthers of + true -> rabbit_mnesia:forget_other_nodes(); + _ -> ok + end, [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 7848c848..43e468ff 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -24,10 +24,14 @@ -compile([export_all]). --rabbit_upgrade({remove_user_scope, []}). --rabbit_upgrade({hash_passwords, []}). --rabbit_upgrade({add_ip_to_listener, []}). --rabbit_upgrade({internal_exchanges, []}). +-rabbit_upgrade({remove_user_scope, mnesia, []}). +-rabbit_upgrade({hash_passwords, mnesia, []}). +-rabbit_upgrade({add_ip_to_listener, mnesia, []}). +-rabbit_upgrade({internal_exchanges, mnesia, []}). + +-rabbit_upgrade({one, mnesia, []}). +-rabbit_upgrade({two, local, [one]}). +-rabbit_upgrade({three, mnesia, [two]}). %% ------------------------------------------------------------------- @@ -85,6 +89,27 @@ internal_exchanges() -> || T <- Tables ], ok. +one() -> + mnesia( + rabbit_user, + fun ({user, Username, Hash, IsAdmin}) -> + {user, Username, Hash, IsAdmin, foo} + end, + [username, password_hash, is_admin, extra]). + +two() -> + ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), + [test]). + +three() -> + mnesia( + rabbit_user, + fun ({user, Username, Hash, IsAdmin, _}) -> + {user, Username, Hash, IsAdmin} + end, + [username, password_hash, is_admin]). + + %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 0b093ecb559424e2b2c7809cba5dc2cbdfab710c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 14:18:51 +0000 Subject: These two cases are the same. --- src/rabbit_mnesia.erl | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 2550bdd4..f1e007a1 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -386,26 +386,19 @@ init_db(ClusterNodes, Force) -> end; true -> ok end, - case {Nodes, mnesia:system_info(use_dir), all_clustered_nodes()} of - {[], true, [_]} -> - %% True single disc node, attempt upgrade + case {Nodes, mnesia:system_info(use_dir)} of + {[], true} -> + %% True single disc node, or master" (i.e. without + %% config) disc node in cluster, attempt upgrade ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade([mnesia, local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; - {[], true, _} -> - %% "Master" (i.e. without config) disc node in cluster, - %% do upgrade - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local]) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; - {[], false, _} -> + {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[AnotherNode|_], _, _} -> + {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), -- cgit v1.2.1 From 50a9fc4fb471d68225090f0b0fe39ead5110012b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:09:18 +0000 Subject: Make disc node reclustering work, various cleanups. --- src/rabbit_mnesia.erl | 94 +++++++++++++++++++++++--------------------------- src/rabbit_upgrade.erl | 24 ++++++------- 2 files changed, 53 insertions(+), 65 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f1e007a1..e5929f86 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - forget_other_nodes/0, empty_ram_only_tables/0, copy_db/1]). + empty_ram_only_tables/0, copy_db/1]). -export([table_names/0]). @@ -66,7 +66,6 @@ -spec(is_clustered/0 :: () -> boolean()). -spec(running_clustered_nodes/0 :: () -> [node()]). -spec(all_clustered_nodes/0 :: () -> [node()]). --spec(forget_other_nodes/0 :: () -> 'ok'). -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). @@ -127,8 +126,8 @@ cluster(ClusterNodes, Force) -> %% return node to its virgin state, where it is not member of any %% cluster, has no cluster configuration, no local database, and no %% persisted messages -reset() -> reset(all). -force_reset() -> reset(force_all). +reset() -> reset(false). +force_reset() -> reset(true). is_clustered() -> RunningNodes = running_clustered_nodes(), @@ -388,10 +387,11 @@ init_db(ClusterNodes, Force) -> end, case {Nodes, mnesia:system_info(use_dir)} of {[], true} -> - %% True single disc node, or master" (i.e. without + %% True single disc node, or "master" (i.e. without %% config) disc node in cluster, attempt upgrade ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local]) of + case rabbit_upgrade:maybe_upgrade( + [mnesia, local], fun forget_other_nodes/0) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; @@ -400,37 +400,27 @@ init_db(ClusterNodes, Force) -> ok = create_schema(); {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case IsDiskNode of - true -> - %% TODO test this branch ;) - %% TODO don't just reset every time we start up! - mnesia:stop(), - reset(mnesia), - mnesia:start(), - %% TODO what should we ensure? - %% ensure_version_ok(rabbit_upgrade:read_version()), - %% ensure_version_ok( - %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - %% TODO needed? - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(disc); - false -> - ok = wait_for_replicated_tables(), - %% TODO can we live without this on disc? - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(ram), - case rabbit_upgrade:maybe_upgrade([local]) of - ok -> - ok; - %% If we're just starting up a new node - %% we won't have a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end + %% TODO what should we ensure? + %% ensure_version_ok(rabbit_upgrade:read_version()), + %% ensure_version_ok( + %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + Type = case ClusterNodes == [] orelse + lists:member(node(), ClusterNodes) of + true -> disc; + false -> ram + end, + case rabbit_upgrade:maybe_upgrade( + [local], reset_fun(ProperClusterNodes)) of + ok -> + ok; + %% If we're just starting up a new node + %% we won't have a version + version_not_available -> + ok = rabbit_upgrade:write_version() end, + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(Type), ensure_schema_ok() end; {error, Reason} -> @@ -470,6 +460,16 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. +reset_fun(ProperClusterNodes) -> + fun() -> + mnesia:stop(), + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema), + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), + {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes) + end. + create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), @@ -583,15 +583,12 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. -%% Mode: force_all - get rid of everything unconditionally -%% all - get rid of everything, conditional on Mnesia working -%% mnesia - just get rid of Mnesia, leave everything else -reset(Mode) -> +reset(Force) -> ok = ensure_mnesia_not_running(), Node = node(), - case Mode of - force_all -> ok; - _ -> + case Force of + true -> ok; + false -> ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), {Nodes, RunningNodes} = @@ -606,14 +603,9 @@ reset(Mode) -> rabbit_misc:ensure_ok(mnesia:delete_schema([Node]), cannot_delete_schema) end, - case Mode of - mnesia -> - ok; - _ -> - ok = delete_cluster_nodes_config(), - %% remove persisted messages and any other garbage we find - ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")) - end, + ok = delete_cluster_nodes_config(), + %% remove persisted messages and any other garbage we find + ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")), ok. leave_cluster([], _) -> ok; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dee08f48..7e59faaf 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/1, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/2, read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -36,7 +36,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +%% TODO update +%%-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -48,23 +49,21 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade(Scopes) -> +maybe_upgrade(Scopes, Fun) -> case read_version() of {ok, CurrentHeads} -> with_upgrade_graph( - fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, G) end); + fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) end); {error, enoent} -> version_not_available end. -maybe_upgrade_graph(CurrentHeads, Scopes, G) -> +maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) -> case unknown_heads(CurrentHeads, G) of [] -> case upgrades_to_apply(CurrentHeads, Scopes, G) of - [] -> - ok; - Upgrades -> - apply_upgrades(Upgrades, lists:member(mnesia, Scopes)) + [] -> ok; + Upgrades -> apply_upgrades(Upgrades, Fun) end; Unknown -> throw({error, {future_upgrades_found, Unknown}}) @@ -133,7 +132,7 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, ForgetOthers) -> +apply_upgrades(Upgrades, Fun) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -148,10 +147,7 @@ apply_upgrades(Upgrades, ForgetOthers) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - case ForgetOthers of - true -> rabbit_mnesia:forget_other_nodes(); - _ -> ok - end, + Fun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From 34ca7a82d250748ea59d92aa499cb562c8332ae4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:12:07 +0000 Subject: Revert arbitrary difference from default. --- src/rabbit_mnesia.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e5929f86..d8086b56 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -404,11 +404,8 @@ init_db(ClusterNodes, Force) -> %% ensure_version_ok(rabbit_upgrade:read_version()), %% ensure_version_ok( %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - Type = case ClusterNodes == [] orelse - lists:member(node(), ClusterNodes) of - true -> disc; - false -> ram - end, + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), case rabbit_upgrade:maybe_upgrade( [local], reset_fun(ProperClusterNodes)) of ok -> @@ -420,7 +417,10 @@ init_db(ClusterNodes, Force) -> end, ok = wait_for_replicated_tables(), ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(Type), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), ensure_schema_ok() end; {error, Reason} -> -- cgit v1.2.1 From 8aaef521a855a3df1223e3b1abeafe204b1e58b6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:24:59 +0000 Subject: Fix spec --- src/rabbit_mnesia.erl | 6 ++++-- src/rabbit_upgrade.erl | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index d8086b56..11e9a178 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -141,7 +141,8 @@ running_clustered_nodes() -> forget_other_nodes() -> Nodes = all_clustered_nodes() -- [node()], - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes]. + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes], + ok. empty_ram_only_tables() -> Node = node(), @@ -467,7 +468,8 @@ reset_fun(ProperClusterNodes) -> cannot_delete_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes) + {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes), + ok end. create_schema() -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 7e59faaf..48c00d69 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -36,8 +36,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). -%% TODO update -%%-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/2 :: ([scope()], fun (() -> 'ok')) + -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -147,7 +147,7 @@ apply_upgrades(Upgrades, Fun) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - Fun(), + ok = Fun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From 00dd61ca4b2372d698225ea3e58a932bdd1baffc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:34:23 +0000 Subject: Check our version matches the remote version. --- src/rabbit_mnesia.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 11e9a178..82e2a30e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -401,10 +401,8 @@ init_db(ClusterNodes, Force) -> ok = create_schema(); {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up - %% TODO what should we ensure? - %% ensure_version_ok(rabbit_upgrade:read_version()), - %% ensure_version_ok( - %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), case rabbit_upgrade:maybe_upgrade( -- cgit v1.2.1 From cec5a2c8548dcc6c7a7ad44c7b72361adca1fccb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 6 Jan 2011 17:50:21 +0000 Subject: Decide the node to do mnesia upgrades based on which was the last disc node to shut down. Blow up with a hopefully helpful error message if the "wrong" disc node is started first. This works; you can now upgrade a disc-only cluster. --- src/rabbit_mnesia.erl | 115 ++++++++++++++++++++++++++++++++++--------------- src/rabbit_upgrade.erl | 18 ++++---- 2 files changed, 91 insertions(+), 42 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 82e2a30e..49d04116 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -43,6 +43,8 @@ %% other mnesia-using Erlang applications, such as ejabberd -export([create_tables/0]). +-define(EXAMPLE_RABBIT_TABLE, rabbit_durable_exchange). + -include("rabbit.hrl"). %%---------------------------------------------------------------------------- @@ -164,7 +166,7 @@ nodes_of_type(Type) -> %% Specifically, we check whether a certain table, which we know %% will be written to disk on a disc node, is stored on disk or in %% RAM. - mnesia:table_info(rabbit_durable_exchange, Type). + mnesia:table_info(?EXAMPLE_RABBIT_TABLE, Type). table_definitions() -> [{rabbit_user, @@ -387,40 +389,50 @@ init_db(ClusterNodes, Force) -> true -> ok end, case {Nodes, mnesia:system_info(use_dir)} of - {[], true} -> - %% True single disc node, or "master" (i.e. without - %% config) disc node in cluster, attempt upgrade - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade( - [mnesia, local], fun forget_other_nodes/0) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[AnotherNode|_], _} -> - %% Subsequent node in cluster, catch up - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case rabbit_upgrade:maybe_upgrade( - [local], reset_fun(ProperClusterNodes)) of - ok -> - ok; - %% If we're just starting up a new node - %% we won't have a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end, - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), - ensure_schema_ok() + {_, _} -> + DiscNodes = mnesia:table_info(schema, disc_copies), + case are_we_upgrader(DiscNodes) of + true -> + %% True single disc node, or last disc + %% node in cluster to shut down, attempt + %% upgrade + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade( + [mnesia, local], + fun () -> ok end, + fun forget_other_nodes/0) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; + false -> + %% Subsequent node in cluster, catch up + %% TODO how to do this? + %% ensure_version_ok( + %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + case rabbit_upgrade:maybe_upgrade( + [local], + ensure_nodes_running_fun(DiscNodes), + reset_fun(DiscNodes -- [node()])) of + ok -> + ok; + %% If we're just starting up a new node + %% we won't have a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end, + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + ensure_schema_ok() + end end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -459,17 +471,52 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -reset_fun(ProperClusterNodes) -> +ensure_nodes_running_fun(Nodes) -> + fun() -> + case nodes_running(Nodes) of + [] -> + exit("Cluster upgrade needed. The first node you start " + "should be the last node to be shut down."); + _ -> + ok + end + end. + +reset_fun(Nodes) -> fun() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes), + {ok, _} = mnesia:change_config(extra_db_nodes, Nodes), ok end. +%% Were we the last node in the cluster to shut down or is there no cluster? +%% The answer to this is yes if: +%% * We are our canonical source for reading a table +%% - If the canonical source is "nowhere" or another node, we are out of date +%% * No other nodes are running Mnesia and have finished booting Rabbit. +%% - Since any node will be its own canonical source once the cluster is up. + +are_we_upgrader(Nodes) -> + Where = mnesia:table_info(?EXAMPLE_RABBIT_TABLE, where_to_read), + Node = node(), + case {Where, nodes_running(Nodes)} of + {Node, []} -> true; + {_, _} -> false + end. + +nodes_running(Nodes) -> + [N || N <- Nodes, node_running(N)]. + +node_running(Node) -> + case rpc:call(Node, application, which_applications, []) of + {badrpc, _} -> false; + Apps -> lists:keysearch(rabbit, 1, Apps) =/= false + end. + create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 48c00d69..c852a0f9 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/2, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/3, read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -36,7 +36,7 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/2 :: ([scope()], fun (() -> 'ok')) +-spec(maybe_upgrade/3 :: ([scope()], fun (() -> 'ok'), fun (() -> 'ok')) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). @@ -49,21 +49,22 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade(Scopes, Fun) -> +maybe_upgrade(Scopes, GuardFun, UpgradeFun) -> case read_version() of {ok, CurrentHeads} -> with_upgrade_graph( - fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) end); + fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, + GuardFun, UpgradeFun, G) end); {error, enoent} -> version_not_available end. -maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) -> +maybe_upgrade_graph(CurrentHeads, Scopes, GuardFun, UpgradeFun, G) -> case unknown_heads(CurrentHeads, G) of [] -> case upgrades_to_apply(CurrentHeads, Scopes, G) of [] -> ok; - Upgrades -> apply_upgrades(Upgrades, Fun) + Upgrades -> apply_upgrades(Upgrades, GuardFun, UpgradeFun) end; Unknown -> throw({error, {future_upgrades_found, Unknown}}) @@ -132,7 +133,8 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, Fun) -> +apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> + GuardFun(), LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -147,7 +149,7 @@ apply_upgrades(Upgrades, Fun) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - ok = Fun(), + ok = UpgradeFun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From d39b09caeb77f61ead9d1621bf808b6d5272d9bb Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 6 Jan 2011 19:04:33 +0000 Subject: Sender-specified distribution First attempt for direct exchanges only --- src/rabbit_exchange_type_direct.erl | 16 +++++++++++++--- src/rabbit_misc.erl | 11 ++++++++++- src/rabbit_router.erl | 21 +++++++++++++++++++-- 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index d49d0199..ab688853 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -31,6 +31,7 @@ -module(rabbit_exchange_type_direct). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -50,9 +51,18 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. -route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey}}) -> - rabbit_router:match_routing_key(Name, RoutingKey). +route(#exchange{name = #resource{virtual_host = VHost} = Name}, + #delivery{message = #basic_message{routing_key = RoutingKey, + content = Content}}) -> + BindingRoutes = rabbit_router:match_routing_key(Name, RoutingKey), + HeaderRKeys = + case (Content#content.properties)#'P_basic'.headers of + undefined -> []; + Headers -> rabbit_misc:table_lookup(Headers, <<"CC">>, <<0>>) ++ + rabbit_misc:table_lookup(Headers, <<"BCC">>, <<0>>) + end, + HeaderRoutes = [rabbit_misc:r(VHost, queue, RKey) || RKey <- HeaderRKeys], + lists:usort(BindingRoutes ++ HeaderRoutes). validate(_X) -> ok. create(_X) -> ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 15ba787a..604346ed 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -40,7 +40,7 @@ protocol_error/3, protocol_error/4, protocol_error/1]). -export([not_found/1, assert_args_equivalence/4]). -export([dirty_read/1]). --export([table_lookup/2]). +-export([table_lookup/3, table_lookup/2]). -export([r/3, r/2, r_arg/4, rs/1]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). @@ -112,6 +112,8 @@ 'ok' | rabbit_types:connection_exit()). -spec(dirty_read/1 :: ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). +-spec(table_lookup/3 :: + (rabbit_framing:amqp_table(), binary(), binary()) -> [binary()]). -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). @@ -253,6 +255,13 @@ dirty_read(ReadSpec) -> [] -> {error, not_found} end. +table_lookup(Table, Key, Separator) -> + case table_lookup(Table, Key) of + undefined -> []; + {longstr, BinVal} -> binary:split(BinVal, Separator, [global]); + _ -> [] + end. + table_lookup(Table, Key) -> case lists:keysearch(Key, 1, Table) of {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin}; diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index d49c072c..2f556df7 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -32,6 +32,7 @@ -module(rabbit_router). -include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -export([deliver/2, match_bindings/2, match_routing_key/2]). @@ -68,22 +69,38 @@ deliver(QNames, Delivery = #delivery{mandatory = false, %% is preserved. This scales much better than the non-immediate %% case below. QPids = lookup_qpids(QNames), + ModifiedDelivery = strip_header(Delivery, <<"BCC">>), delegate:invoke_no_result( - QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, Delivery) end), + QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), {routed, QPids}; deliver(QNames, Delivery = #delivery{mandatory = Mandatory, immediate = Immediate}) -> QPids = lookup_qpids(QNames), + ModifiedDelivery = strip_header(Delivery, <<"BCC">>), {Success, _} = delegate:invoke(QPids, fun (Pid) -> - rabbit_amqqueue:deliver(Pid, Delivery) + rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), {Routed, Handled} = lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). +strip_header(Delivery = #delivery{message = Message = #basic_message{ + content = Content = #content{ + properties = Props = #'P_basic'{headers = Headers}}}}, + Key) when Headers =/= undefined -> + case lists:keyfind(Key, 1, Headers) of + false -> Delivery; + Tuple -> Headers0 = lists:delete(Tuple, Headers), + Delivery#delivery{message = Message#basic_message{ + content = Content#content{ + properties_bin = none, + properties = Props#'P_basic'{headers = Headers0}}}} + end; +strip_header(Delivery, _Key) -> + Delivery. %% TODO: Maybe this should be handled by a cursor instead. %% TODO: This causes a full scan for each entry with the same source -- cgit v1.2.1 From 9ab02c62b7edda1a097912b1f0194788df15f2ff Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 7 Jan 2011 11:54:35 +0000 Subject: Ironically our dummy upgrades now need to be upgraded. --- src/rabbit_upgrade_functions.erl | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 8fee70af..1806c40f 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -91,11 +91,21 @@ internal_exchanges() -> || T <- Tables ], ok. +user_to_internal_user() -> + mnesia( + rabbit_user, + fun({user, Username, PasswordHash, IsAdmin}) -> + {internal_user, Username, PasswordHash, IsAdmin} + end, + [username, password_hash, is_admin], internal_user). + + + one() -> mnesia( rabbit_user, - fun ({user, Username, Hash, IsAdmin}) -> - {user, Username, Hash, IsAdmin, foo} + fun ({internal_user, Username, Hash, IsAdmin}) -> + {internal_user, Username, Hash, IsAdmin, foo} end, [username, password_hash, is_admin, extra]). @@ -106,20 +116,11 @@ two() -> three() -> mnesia( rabbit_user, - fun ({user, Username, Hash, IsAdmin, _}) -> - {user, Username, Hash, IsAdmin} + fun ({internal_user, Username, Hash, IsAdmin, _}) -> + {internal_user, Username, Hash, IsAdmin} end, [username, password_hash, is_admin]). - -user_to_internal_user() -> - mnesia( - rabbit_user, - fun({user, Username, PasswordHash, IsAdmin}) -> - {internal_user, Username, PasswordHash, IsAdmin} - end, - [username, password_hash, is_admin], internal_user). - %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From d235fbe0db6c709860e8fa19d7917484ca902d2e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 7 Jan 2011 12:07:12 +0000 Subject: Refactor a bit, reinstate ensure_version_ok check. --- src/rabbit_mnesia.erl | 86 +++++++++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c536c64f..9ea1be28 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -393,46 +393,7 @@ init_db(ClusterNodes, Force) -> %% Nothing there at all, start from scratch ok = create_schema(); {_, _} -> - DiscNodes = mnesia:table_info(schema, disc_copies), - case are_we_upgrader(DiscNodes) of - true -> - %% True single disc node, or last disc - %% node in cluster to shut down, attempt - %% upgrade - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade( - [mnesia, local], - fun () -> ok end, - fun forget_other_nodes/0) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; - false -> - %% Subsequent node in cluster, catch up - %% TODO how to do this? - %% ensure_version_ok( - %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case rabbit_upgrade:maybe_upgrade( - [local], - ensure_nodes_running_fun(DiscNodes), - reset_fun(DiscNodes -- [node()])) of - ok -> - ok; - %% If we're just starting up a new node - %% we won't have a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end, - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), - ensure_schema_ok() - end + ok = setup_existing_node(ClusterNodes, Nodes) end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -441,6 +402,49 @@ init_db(ClusterNodes, Force) -> throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) end. +setup_existing_node(ClusterNodes, Nodes) -> + DiscNodes = mnesia:table_info(schema, disc_copies), + case are_we_upgrader(DiscNodes) of + true -> + %% True single disc node, or last disc node in cluster to + %% shut down, attempt upgrade + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade( + [mnesia, local], fun () -> ok end, + fun forget_other_nodes/0) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; + false -> + %% Subsequent node in cluster, catch up + case Nodes of + [AnotherNode|_] -> + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])); + [] -> + ok + end, + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + case rabbit_upgrade:maybe_upgrade( + [local], ensure_nodes_running_fun(DiscNodes), + reset_fun(DiscNodes -- [node()])) of + ok -> + ok; + %% If we're just starting up a new node we won't have + %% a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end, + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + ensure_schema_ok() + end. + schema_ok_or_move() -> case check_schema_integrity() of ok -> @@ -476,7 +480,7 @@ ensure_nodes_running_fun(Nodes) -> case nodes_running(Nodes) of [] -> exit("Cluster upgrade needed. The first node you start " - "should be the last node to be shut down."); + "should be the last disc node to be shut down."); _ -> ok end -- cgit v1.2.1 From d1e659c8536e4bdd855d881eb2b1b6ea7def180a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 7 Jan 2011 13:23:21 +0000 Subject: Cosmetic --- src/rabbit_mnesia.erl | 27 ++++++++++++++++----------- src/rabbit_upgrade.erl | 2 +- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 9ea1be28..ca84b29e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -407,11 +407,10 @@ setup_existing_node(ClusterNodes, Nodes) -> case are_we_upgrader(DiscNodes) of true -> %% True single disc node, or last disc node in cluster to - %% shut down, attempt upgrade + %% shut down, attempt upgrade if necessary ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade( - [mnesia, local], fun () -> ok end, - fun forget_other_nodes/0) of + case rabbit_upgrade:maybe_upgrade([mnesia, local], fun () -> ok end, + fun forget_other_nodes/0) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; @@ -427,7 +426,8 @@ setup_existing_node(ClusterNodes, Nodes) -> IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), case rabbit_upgrade:maybe_upgrade( - [local], ensure_nodes_running_fun(DiscNodes), + [local], + ensure_nodes_running_fun(DiscNodes), reset_fun(DiscNodes -- [node()])) of ok -> ok; @@ -475,9 +475,9 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -ensure_nodes_running_fun(Nodes) -> +ensure_nodes_running_fun(DiscNodes) -> fun() -> - case nodes_running(Nodes) of + case nodes_running(DiscNodes) of [] -> exit("Cluster upgrade needed. The first node you start " "should be the last disc node to be shut down."); @@ -486,23 +486,28 @@ ensure_nodes_running_fun(Nodes) -> end end. -reset_fun(Nodes) -> +reset_fun(OtherNodes) -> fun() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - {ok, _} = mnesia:change_config(extra_db_nodes, Nodes), + {ok, _} = mnesia:change_config(extra_db_nodes, OtherNodes), ok end. %% Were we the last node in the cluster to shut down or is there no cluster? %% The answer to this is yes if: %% * We are our canonical source for reading a table -%% - If the canonical source is "nowhere" or another node, we are out of date +%% - If the canonical source is "nowhere" or another node, we are out +%% of date +%% and %% * No other nodes are running Mnesia and have finished booting Rabbit. -%% - Since any node will be its own canonical source once the cluster is up. +%% - Since any node will be its own canonical source once the cluster +%% is up, but just having Mnesia running is not enough - that node +%% could be halfway through starting (and deciding it is the upgrader +%% too) are_we_upgrader(Nodes) -> Where = mnesia:table_info(?EXAMPLE_RABBIT_TABLE, where_to_read), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index c852a0f9..3a78dd7f 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -134,7 +134,7 @@ heads(G) -> %% ------------------------------------------------------------------- apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> - GuardFun(), + ok = GuardFun(), LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> -- cgit v1.2.1 From af1a5fa2320b99d421f84c09e1fa8e2594ba3950 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 10 Jan 2011 17:25:13 +0000 Subject: Move the upgrade tests earlier in the boot process. This doesn't work either, just committing it in order not to lose it. --- src/rabbit_mnesia.erl | 107 +++++++++++++++++++++++++------------------------ src/rabbit_upgrade.erl | 53 +++++++++++++----------- 2 files changed, 84 insertions(+), 76 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index ca84b29e..a11347ff 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -94,6 +94,7 @@ status() -> {running_nodes, running_clustered_nodes()}]. init() -> + ok = maybe_reset_for_upgrades(), ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true), @@ -141,11 +142,6 @@ all_clustered_nodes() -> running_clustered_nodes() -> mnesia:system_info(running_db_nodes). -forget_other_nodes() -> - Nodes = all_clustered_nodes() -- [node()], - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes], - ok. - empty_ram_only_tables() -> Node = node(), lists:foreach( @@ -404,17 +400,17 @@ init_db(ClusterNodes, Force) -> setup_existing_node(ClusterNodes, Nodes) -> DiscNodes = mnesia:table_info(schema, disc_copies), - case are_we_upgrader(DiscNodes) of - true -> - %% True single disc node, or last disc node in cluster to - %% shut down, attempt upgrade if necessary + Node = node(), + case upgrader(DiscNodes) of + Node -> + %% True single disc node, or upgrader node - attempt + %% upgrade if necessary ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local], fun () -> ok end, - fun forget_other_nodes/0) of + case rabbit_upgrade:maybe_upgrade([mnesia, local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; - false -> + _ -> %% Subsequent node in cluster, catch up case Nodes of [AnotherNode|_] -> @@ -423,12 +419,8 @@ setup_existing_node(ClusterNodes, Nodes) -> [] -> ok end, - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case rabbit_upgrade:maybe_upgrade( - [local], - ensure_nodes_running_fun(DiscNodes), - reset_fun(DiscNodes -- [node()])) of + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade([local]) of ok -> ok; %% If we're just starting up a new node we won't have @@ -436,13 +428,21 @@ setup_existing_node(ClusterNodes, Nodes) -> version_not_available -> ok = rabbit_upgrade:write_version() end, + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), ok = create_local_table_copy(schema, disc_copies), ok = create_local_table_copies(case IsDiskNode of true -> disc; false -> ram end), - ensure_schema_ok() + ensure_schema_ok(), + %% If we're just starting up a new node we won't have + %% a version + case rabbit_upgrade:read_version() of + {error, _} -> rabbit_upgrade:write_version(); + _ -> ok + end end. schema_ok_or_move() -> @@ -475,50 +475,48 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -ensure_nodes_running_fun(DiscNodes) -> - fun() -> - case nodes_running(DiscNodes) of - [] -> - exit("Cluster upgrade needed. The first node you start " - "should be the last disc node to be shut down."); +maybe_reset_for_upgrades() -> + case rabbit_upgrade:upgrade_required([mnesia]) of + true -> + DiscNodes = all_clustered_nodes(), + Upgrader = upgrader(DiscNodes), + case node() of + Upgrader -> + reset_for_primary_upgrade(DiscNodes); _ -> - ok - end + reset_for_non_primary_upgrade(Upgrader, DiscNodes) + end; + false -> + ok end. -reset_fun(OtherNodes) -> - fun() -> +reset_for_primary_upgrade(DiscNodes) -> + Others = DiscNodes -- [node()], + ensure_mnesia_running(), + force_tables(), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], + ok. + +reset_for_non_primary_upgrade(Upgrader, DiscNodes) -> + case node_running(Upgrader) of + false -> + exit(lists:flatten( + io_lib:format( + "Cluster upgrade needed. Please start node ~s first", + [Upgrader]))); + true -> + OtherNodes = DiscNodes -- [node()], mnesia:stop(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), + mnesia:start(), {ok, _} = mnesia:change_config(extra_db_nodes, OtherNodes), ok end. -%% Were we the last node in the cluster to shut down or is there no cluster? -%% The answer to this is yes if: -%% * We are our canonical source for reading a table -%% - If the canonical source is "nowhere" or another node, we are out -%% of date -%% and -%% * No other nodes are running Mnesia and have finished booting Rabbit. -%% - Since any node will be its own canonical source once the cluster -%% is up, but just having Mnesia running is not enough - that node -%% could be halfway through starting (and deciding it is the upgrader -%% too) - -are_we_upgrader(Nodes) -> - Where = mnesia:table_info(?EXAMPLE_RABBIT_TABLE, where_to_read), - Node = node(), - case {Where, nodes_running(Nodes)} of - {Node, []} -> true; - {_, _} -> false - end. - -nodes_running(Nodes) -> - [N || N <- Nodes, node_running(N)]. +upgrader(Nodes) -> + [Upgrader|_] = lists:usort(Nodes), + Upgrader. node_running(Node) -> case rpc:call(Node, application, which_applications, []) of @@ -639,6 +637,9 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. +force_tables() -> + [mnesia:force_load_table(T) || T <- table_names()]. + reset(Force) -> ok = ensure_mnesia_not_running(), Node = node(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 3a78dd7f..260f85a1 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,8 @@ -module(rabbit_upgrade). --export([maybe_upgrade/3, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/1, upgrade_required/1]). +-export([read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -36,8 +37,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/3 :: ([scope()], fun (() -> 'ok'), fun (() -> 'ok')) - -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +-spec(upgrade_required/1 :: ([scope()]) -> boolean()). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -49,25 +50,18 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade(Scopes, GuardFun, UpgradeFun) -> - case read_version() of - {ok, CurrentHeads} -> - with_upgrade_graph( - fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, - GuardFun, UpgradeFun, G) end); - {error, enoent} -> - version_not_available +maybe_upgrade(Scopes) -> + case upgrades_required(Scopes) of + version_not_available -> version_not_available; + [] -> ok; + Upgrades -> apply_upgrades(Upgrades) end. -maybe_upgrade_graph(CurrentHeads, Scopes, GuardFun, UpgradeFun, G) -> - case unknown_heads(CurrentHeads, G) of - [] -> - case upgrades_to_apply(CurrentHeads, Scopes, G) of - [] -> ok; - Upgrades -> apply_upgrades(Upgrades, GuardFun, UpgradeFun) - end; - Unknown -> - throw({error, {future_upgrades_found, Unknown}}) +upgrade_required(Scopes) -> + case upgrades_required(Scopes) of + version_not_available -> false; + [] -> false; + _ -> true end. read_version() -> @@ -85,6 +79,21 @@ desired_version() -> %% ------------------------------------------------------------------- +upgrades_required(Scopes) -> + case read_version() of + {ok, CurrentHeads} -> + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> upgrades_to_apply(CurrentHeads, Scopes, G); + Unknown -> throw({error, + {future_upgrades_found, Unknown}}) + end + end); + {error, enoent} -> + version_not_available + end. + with_upgrade_graph(Fun) -> case rabbit_misc:build_acyclic_graph( fun vertices/2, fun edges/2, @@ -133,8 +142,7 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> - ok = GuardFun(), +apply_upgrades(Upgrades) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -149,7 +157,6 @@ apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - ok = UpgradeFun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From 0fef8fdcc755596782543d432a7103d5c7dd90fc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 12:44:43 +0000 Subject: Holy %$*! it works. Still needs tidying up somewhat... --- src/rabbit_mnesia.erl | 99 ++++++++---------------------------------------- src/rabbit_prelaunch.erl | 4 +- src/rabbit_upgrade.erl | 77 +++++++++++++++++++++++++++++++------ 3 files changed, 82 insertions(+), 98 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a11347ff..345ca82a 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1]). + empty_ram_only_tables/0, copy_db/1, create_cluster_nodes_config/1]). -export([table_names/0]). @@ -94,7 +94,6 @@ status() -> {running_nodes, running_clustered_nodes()}]. init() -> - ok = maybe_reset_for_upgrades(), ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true), @@ -399,35 +398,19 @@ init_db(ClusterNodes, Force) -> end. setup_existing_node(ClusterNodes, Nodes) -> - DiscNodes = mnesia:table_info(schema, disc_copies), - Node = node(), - case upgrader(DiscNodes) of - Node -> - %% True single disc node, or upgrader node - attempt - %% upgrade if necessary + case Nodes of + [] -> + %% We're the first node up ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local]) of + case rabbit_upgrade:maybe_upgrade([local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; - _ -> + [AnotherNode|_] -> %% Subsequent node in cluster, catch up - case Nodes of - [AnotherNode|_] -> - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])); - [] -> - ok - end, + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([local]) of - ok -> - ok; - %% If we're just starting up a new node we won't have - %% a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end, IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), @@ -436,13 +419,13 @@ setup_existing_node(ClusterNodes, Nodes) -> true -> disc; false -> ram end), - ensure_schema_ok(), - %% If we're just starting up a new node we won't have - %% a version - case rabbit_upgrade:read_version() of - {error, _} -> rabbit_upgrade:write_version(); - _ -> ok - end + case rabbit_upgrade:maybe_upgrade([local]) of + ok -> ok; + %% If we're just starting up a new node we won't have + %% a version + version_not_available -> ok = rabbit_upgrade:write_version() + end, + ensure_schema_ok() end. schema_ok_or_move() -> @@ -475,55 +458,6 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -maybe_reset_for_upgrades() -> - case rabbit_upgrade:upgrade_required([mnesia]) of - true -> - DiscNodes = all_clustered_nodes(), - Upgrader = upgrader(DiscNodes), - case node() of - Upgrader -> - reset_for_primary_upgrade(DiscNodes); - _ -> - reset_for_non_primary_upgrade(Upgrader, DiscNodes) - end; - false -> - ok - end. - -reset_for_primary_upgrade(DiscNodes) -> - Others = DiscNodes -- [node()], - ensure_mnesia_running(), - force_tables(), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], - ok. - -reset_for_non_primary_upgrade(Upgrader, DiscNodes) -> - case node_running(Upgrader) of - false -> - exit(lists:flatten( - io_lib:format( - "Cluster upgrade needed. Please start node ~s first", - [Upgrader]))); - true -> - OtherNodes = DiscNodes -- [node()], - mnesia:stop(), - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema), - mnesia:start(), - {ok, _} = mnesia:change_config(extra_db_nodes, OtherNodes), - ok - end. - -upgrader(Nodes) -> - [Upgrader|_] = lists:usort(Nodes), - Upgrader. - -node_running(Node) -> - case rpc:call(Node, application, which_applications, []) of - {badrpc, _} -> false; - Apps -> lists:keysearch(rabbit, 1, Apps) =/= false - end. - create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), @@ -637,9 +571,6 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. -force_tables() -> - [mnesia:force_load_table(T) || T <- table_names()]. - reset(Force) -> ok = ensure_mnesia_not_running(), Node = node(), diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 8ae45abd..c5ee63ba 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -250,8 +250,8 @@ post_process_script(ScriptFile) -> {error, {failed_to_load_script, Reason}} end. -process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> - [{apply,{rabbit,prepare,[]}}, Entry]; +process_entry(Entry = {apply,{application,start_boot,[mnesia,permanent]}}) -> + [{apply,{rabbit_upgrade,maybe_upgrade_mnesia,[]}}, Entry]; process_entry(Entry) -> [Entry]. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 260f85a1..9f9e8806 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/1, upgrade_required/1]). +-export([maybe_upgrade_mnesia/0, maybe_upgrade/1]). -export([read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -37,8 +37,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). +-spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). --spec(upgrade_required/1 :: ([scope()]) -> boolean()). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -47,9 +47,69 @@ %% ------------------------------------------------------------------- -%% Try to upgrade the schema. If no information on the existing schema -%% could be found, do nothing. rabbit_mnesia:check_schema_integrity() -%% will catch the problem. +maybe_upgrade_mnesia() -> + rabbit:prepare(), + case upgrades_required([mnesia]) of + Upgrades = [_|_] -> + DiscNodes = rabbit_mnesia:all_clustered_nodes(), + Upgrader = upgrader(DiscNodes), + case node() of + Upgrader -> + primary_upgrade(Upgrades, DiscNodes); + _ -> + non_primary_upgrade(Upgrader, DiscNodes) + end; + [] -> + ok; + version_not_available -> + ok + end. + +upgrader(Nodes) -> + [Upgrader|_] = lists:usort(Nodes), + Upgrader. + +primary_upgrade(Upgrades, DiscNodes) -> + Others = DiscNodes -- [node()], + %% TODO this should happen after backing up! + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), + force_tables(), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], + apply_upgrades(Upgrades), + ok. + +force_tables() -> + [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. + +non_primary_upgrade(Upgrader, DiscNodes) -> + case node_running(Upgrader) of + false -> + Msg = "~n~n * Cluster upgrade needed. Please start node ~s " + "first. * ~n~n~n", + Args = [Upgrader], + %% We don't throw or exit here since that gets thrown + %% straight out into do_boot, generating an erl_crash.dump + %% and displaying any error message in a confusing way. + error_logger:error_msg(Msg, Args), + io:format(Msg, Args), + error_logger:logfile(close), + halt(1); + true -> + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema), + ok = rabbit_mnesia:create_cluster_nodes_config(DiscNodes), + ok + end. + +node_running(Node) -> + case rpc:call(Node, application, which_applications, []) of + {badrpc, _} -> false; + Apps -> lists:keysearch(rabbit, 1, Apps) =/= false + end. + +%% ------------------------------------------------------------------- + maybe_upgrade(Scopes) -> case upgrades_required(Scopes) of version_not_available -> version_not_available; @@ -57,13 +117,6 @@ maybe_upgrade(Scopes) -> Upgrades -> apply_upgrades(Upgrades) end. -upgrade_required(Scopes) -> - case upgrades_required(Scopes) of - version_not_available -> false; - [] -> false; - _ -> true - end. - read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [Heads]} -> {ok, Heads}; -- cgit v1.2.1 From a153921362e59e87f5052e5ce80f765425777b59 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 13:34:02 +0000 Subject: One DAG per scope. --- src/rabbit_mnesia.erl | 4 +- src/rabbit_upgrade.erl | 82 +++++++++++++++++++++------------------- src/rabbit_upgrade_functions.erl | 4 +- 3 files changed, 48 insertions(+), 42 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 345ca82a..997b12d4 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -402,7 +402,7 @@ setup_existing_node(ClusterNodes, Nodes) -> [] -> %% We're the first node up ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([local]) of + case rabbit_upgrade:maybe_upgrade(local) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; @@ -419,7 +419,7 @@ setup_existing_node(ClusterNodes, Nodes) -> true -> disc; false -> ram end), - case rabbit_upgrade:maybe_upgrade([local]) of + case rabbit_upgrade:maybe_upgrade(local) of ok -> ok; %% If we're just starting up a new node we won't have %% a version diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9f9e8806..4bdff65a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -28,17 +28,18 @@ -define(VERSION_FILENAME, "schema_version"). -define(LOCK_FILENAME, "schema_upgrade_lock"). +-define(SCOPES, [mnesia, local]). %% ------------------------------------------------------------------- -ifdef(use_specs). -type(step() :: atom()). --type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). +-type(scope() :: 'mnesia' | 'local'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). --spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/1 :: (scope()) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -49,8 +50,8 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), - case upgrades_required([mnesia]) of - Upgrades = [_|_] -> + case upgrades_required(mnesia) of + [_|_] = Upgrades -> DiscNodes = rabbit_mnesia:all_clustered_nodes(), Upgrader = upgrader(DiscNodes), case node() of @@ -72,8 +73,7 @@ upgrader(Nodes) -> primary_upgrade(Upgrades, DiscNodes) -> Others = DiscNodes -- [node()], %% TODO this should happen after backing up! - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], apply_upgrades(Upgrades), @@ -110,8 +110,8 @@ node_running(Node) -> %% ------------------------------------------------------------------- -maybe_upgrade(Scopes) -> - case upgrades_required(Scopes) of +maybe_upgrade(Scope) -> + case upgrades_required(Scope) of version_not_available -> version_not_available; [] -> ok; Upgrades -> apply_upgrades(Upgrades) @@ -128,34 +128,41 @@ write_version() -> ok. desired_version() -> - with_upgrade_graph(fun (G) -> heads(G) end). + lists:append( + [with_upgrade_graph(fun (_, G) -> heads(G) end, Scope, []) + || Scope <- ?SCOPES]). %% ------------------------------------------------------------------- -upgrades_required(Scopes) -> +upgrades_required(Scope) -> case read_version() of {ok, CurrentHeads} -> - with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> upgrades_to_apply(CurrentHeads, Scopes, G); - Unknown -> throw({error, - {future_upgrades_found, Unknown}}) - end - end); + with_upgrade_graph(fun upgrades_to_apply/2, Scope, CurrentHeads); {error, enoent} -> version_not_available end. -with_upgrade_graph(Fun) -> +with_upgrade_graph(Fun, Scope, CurrentHeads) -> + G0 = make_graph(Scope), + Gs = [G0|[make_graph(S) || S <- ?SCOPES -- [Scope]]], + try + Known = lists:append([digraph:vertices(G) || G <- Gs]), + case unknown_heads(CurrentHeads, Known) of + [] -> ok; + Unknown -> throw({error, {future_upgrades_found, Unknown}}) + end, + Fun(CurrentHeads, G0) + after + [true = digraph:delete(G) || G <- Gs] + end. + +make_graph(Scope) -> case rabbit_misc:build_acyclic_graph( - fun vertices/2, fun edges/2, + fun (Module, Steps) -> vertices(Module, Steps, Scope) end, + fun (Module, Steps) -> edges(Module, Steps, Scope) end, rabbit_misc:all_module_attributes(rabbit_upgrade)) of - {ok, G} -> try - Fun(G) - after - true = digraph:delete(G) - end; + {ok, G} -> + G; {error, {vertex, duplicate, StepName}} -> throw({error, {duplicate_upgrade_step, StepName}}); {error, {edge, {bad_vertex, StepName}, _From, _To}} -> @@ -164,18 +171,19 @@ with_upgrade_graph(Fun) -> throw({error, {cycle_in_upgrade_steps, StepNames}}) end. -vertices(Module, Steps) -> - [{StepName, {Scope, {Module, StepName}}} || - {StepName, Scope, _Reqs} <- Steps]. +vertices(Module, Steps, Scope0) -> + [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps, + Scope0 == Scope1]. -edges(_Module, Steps) -> - [{Require, StepName} || {StepName, _Scope, Requires} <- Steps, - Require <- Requires]. +edges(_Module, Steps, Scope0) -> + [{Require, StepName} || {StepName, Scope1, Requires} <- Steps, + Require <- Requires, + Scope0 == Scope1]. -unknown_heads(Heads, G) -> - [H || H <- Heads, digraph:vertex(G, H) =:= false]. +unknown_heads(Heads, Known) -> + lists:filter(fun(H) -> not lists:member(H, Known) end, Heads). -upgrades_to_apply(Heads, Scopes, G) -> +upgrades_to_apply(Heads, G) -> %% Take all the vertices which can reach the known heads. That's %% everything we've already applied. Subtract that from all %% vertices: that's what we have to apply. @@ -185,10 +193,8 @@ upgrades_to_apply(Heads, Scopes, G) -> sets:from_list(digraph_utils:reaching(Heads, G)))), %% Form a subgraph from that list and find a topological ordering %% so we can invoke them in order. - Sorted = [element(2, digraph:vertex(G, StepName)) || - StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))], - %% Only return the upgrades for the appropriate scopes - [Upgrade || {Scope, Upgrade} <- Sorted, lists:member(Scope, Scopes)]. + [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. heads(G) -> lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 4068b090..b9b46f9a 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -31,8 +31,8 @@ -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). -rabbit_upgrade({one, mnesia, [user_to_internal_user]}). --rabbit_upgrade({two, local, [one]}). --rabbit_upgrade({three, mnesia, [two]}). +-rabbit_upgrade({two, mnesia, [one]}). +-rabbit_upgrade({three, local, []}). %% ------------------------------------------------------------------- -- cgit v1.2.1 From 70c6ce665144f6d85a160e842c4cdfe543865ef4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 13:44:41 +0000 Subject: Break the cluster *after* taking the backup. --- src/rabbit_mnesia.erl | 9 +-------- src/rabbit_upgrade.erl | 19 ++++++++++++------- src/rabbit_upgrade_functions.erl | 8 ++++---- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 997b12d4..26fda4e9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -492,14 +492,7 @@ move_db() -> ok. copy_db(Destination) -> - mnesia:stop(), - case rabbit_misc:recursive_copy(dir(), Destination) of - ok -> - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = wait_for_tables(); - {error, E} -> - {error, E} - end. + rabbit_misc:recursive_copy(dir(), Destination). create_tables() -> lists:foreach(fun ({Tab, TabDef}) -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 4bdff65a..d0fdbf08 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -72,11 +72,15 @@ upgrader(Nodes) -> primary_upgrade(Upgrades, DiscNodes) -> Others = DiscNodes -- [node()], - %% TODO this should happen after backing up! - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - force_tables(), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], - apply_upgrades(Upgrades), + apply_upgrades( + Upgrades, + fun () -> + info("Upgrades: Breaking cluster~n", []), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + force_tables(), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] + end), ok. force_tables() -> @@ -114,7 +118,7 @@ maybe_upgrade(Scope) -> case upgrades_required(Scope) of version_not_available -> version_not_available; [] -> ok; - Upgrades -> apply_upgrades(Upgrades) + Upgrades -> apply_upgrades(Upgrades, fun() -> ok end) end. read_version() -> @@ -201,7 +205,7 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades) -> +apply_upgrades(Upgrades, Fun) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -216,6 +220,7 @@ apply_upgrades(Upgrades) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + Fun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index b9b46f9a..151b498d 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -110,10 +110,6 @@ one() -> [username, password_hash, is_admin, extra]). two() -> - ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), - [test]). - -three() -> mnesia( rabbit_user, fun ({internal_user, Username, Hash, IsAdmin, _}) -> @@ -121,6 +117,10 @@ three() -> end, [username, password_hash, is_admin]). +three() -> + ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), + [test]). + %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 31cef377dff7bdfce6bff9b802ad0dd22d3341a1 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:03:29 +0000 Subject: Store the version as an orddict keyed on different scopes, and thus don't assert that everything is done after the first upgrade. --- src/rabbit_upgrade.erl | 98 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 35 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index d0fdbf08..9d6263fe 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -35,7 +35,7 @@ -ifdef(use_specs). -type(step() :: atom()). --type(version() :: [step()]). +-type(version() :: [{scope(), [step()]}]). -type(scope() :: 'mnesia' | 'local'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). @@ -73,9 +73,10 @@ upgrader(Nodes) -> primary_upgrade(Upgrades, DiscNodes) -> Others = DiscNodes -- [node()], apply_upgrades( + mnesia, Upgrades, fun () -> - info("Upgrades: Breaking cluster~n", []), + info("mnesia upgrades: Breaking cluster~n", []), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), [{atomic, ok} = mnesia:del_table_copy(schema, Node) @@ -118,55 +119,80 @@ maybe_upgrade(Scope) -> case upgrades_required(Scope) of version_not_available -> version_not_available; [] -> ok; - Upgrades -> apply_upgrades(Upgrades, fun() -> ok end) + Upgrades -> apply_upgrades(Scope, Upgrades, + fun() -> ok end) end. read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [Heads]} -> {ok, Heads}; + {ok, [V]} -> case orddict:find(mnesia, V) of + error -> {ok, convert_old_version(V)}; + _ -> {ok, V} + end; {error, _} = Err -> Err end. +read_version(Scope) -> + case read_version() of + {error, _} = E -> E; + {ok, V} -> {ok, orddict:fetch(Scope, V)} + end. + write_version() -> ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), ok. +write_version(Scope) -> + {ok, V0} = read_version(), + V = orddict:store(Scope, desired_version(Scope), V0), + ok = rabbit_misc:write_term_file(schema_filename(), [V]), + ok. + desired_version() -> - lists:append( - [with_upgrade_graph(fun (_, G) -> heads(G) end, Scope, []) - || Scope <- ?SCOPES]). + lists:foldl( + fun (Scope, Acc) -> + orddict:store(Scope, desired_version(Scope), Acc) + end, + orddict:new(), ?SCOPES). + +desired_version(Scope) -> + with_upgrade_graph(fun (G) -> heads(G) end, Scope). + +convert_old_version(Heads) -> + Locals = [add_queue_ttl], + V0 = orddict:new(), + V1 = orddict:store(mnesia, Heads -- Locals, V0), + orddict:store(local, + lists:filter(fun(H) -> lists:member(H, Locals) end, Heads), + V1). %% ------------------------------------------------------------------- upgrades_required(Scope) -> - case read_version() of + case read_version(Scope) of {ok, CurrentHeads} -> - with_upgrade_graph(fun upgrades_to_apply/2, Scope, CurrentHeads); + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> upgrades_to_apply(CurrentHeads, G); + Unknown -> throw({error, + {future_upgrades_found, Unknown}}) + end + end, Scope); {error, enoent} -> version_not_available end. -with_upgrade_graph(Fun, Scope, CurrentHeads) -> - G0 = make_graph(Scope), - Gs = [G0|[make_graph(S) || S <- ?SCOPES -- [Scope]]], - try - Known = lists:append([digraph:vertices(G) || G <- Gs]), - case unknown_heads(CurrentHeads, Known) of - [] -> ok; - Unknown -> throw({error, {future_upgrades_found, Unknown}}) - end, - Fun(CurrentHeads, G0) - after - [true = digraph:delete(G) || G <- Gs] - end. - -make_graph(Scope) -> +with_upgrade_graph(Fun, Scope) -> case rabbit_misc:build_acyclic_graph( fun (Module, Steps) -> vertices(Module, Steps, Scope) end, fun (Module, Steps) -> edges(Module, Steps, Scope) end, rabbit_misc:all_module_attributes(rabbit_upgrade)) of - {ok, G} -> - G; + {ok, G} -> try + Fun(G) + after + true = digraph:delete(G) + end; {error, {vertex, duplicate, StepName}} -> throw({error, {duplicate_upgrade_step, StepName}}); {error, {edge, {bad_vertex, StepName}, _From, _To}} -> @@ -205,12 +231,12 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, Fun) -> +apply_upgrades(Scope, Upgrades, Fun) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> BackupDir = dir() ++ "-upgrade-backup", - info("Upgrades: ~w to apply~n", [length(Upgrades)]), + info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), case rabbit_mnesia:copy_db(BackupDir) of ok -> %% We need to make the backup after creating the @@ -219,13 +245,15 @@ apply_upgrades(Upgrades, Fun) -> %% the lock file exists in the backup too, which %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), - info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + info("~s upgrades: Mnesia dir backed up to ~p~n", + [Scope, BackupDir]), Fun(), - [apply_upgrade(Upgrade) || Upgrade <- Upgrades], - info("Upgrades: All upgrades applied successfully~n", []), - ok = write_version(), + [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], + info("~s upgrades: All upgrades applied successfully~n", + [Scope]), + ok = write_version(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), - info("Upgrades: Mnesia backup removed~n", []), + info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); {error, E} -> %% If we can't backup, the upgrade hasn't started @@ -238,8 +266,8 @@ apply_upgrades(Upgrades, Fun) -> throw({error, previous_upgrade_failed}) end. -apply_upgrade({M, F}) -> - info("Upgrades: Applying ~w:~w~n", [M, F]), +apply_upgrade(Scope, {M, F}) -> + info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), ok = apply(M, F, []). %% ------------------------------------------------------------------- -- cgit v1.2.1 From 2a43f15e16c5ab4c47827efc6e361b3badc69fba Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:09:19 +0000 Subject: Note that we've upgraded here --- src/rabbit_upgrade.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9d6263fe..9ce9b385 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -104,6 +104,7 @@ non_primary_upgrade(Upgrader, DiscNodes) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), ok = rabbit_mnesia:create_cluster_nodes_config(DiscNodes), + write_version(mnesia), ok end. -- cgit v1.2.1 From 8d1365c898057bec83c45201d99c7ca8d5815e3a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:11:44 +0000 Subject: Remove test upgrades --- src/rabbit_upgrade_functions.erl | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 151b498d..d2ef31b9 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -30,10 +30,6 @@ -rabbit_upgrade({internal_exchanges, mnesia, []}). -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). --rabbit_upgrade({one, mnesia, [user_to_internal_user]}). --rabbit_upgrade({two, mnesia, [one]}). --rabbit_upgrade({three, local, []}). - %% ------------------------------------------------------------------- -ifdef(use_specs). @@ -99,28 +95,6 @@ user_to_internal_user() -> end, [username, password_hash, is_admin], internal_user). - - -one() -> - mnesia( - rabbit_user, - fun ({internal_user, Username, Hash, IsAdmin}) -> - {internal_user, Username, Hash, IsAdmin, foo} - end, - [username, password_hash, is_admin, extra]). - -two() -> - mnesia( - rabbit_user, - fun ({internal_user, Username, Hash, IsAdmin, _}) -> - {internal_user, Username, Hash, IsAdmin} - end, - [username, password_hash, is_admin]). - -three() -> - ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), - [test]). - %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From fc4e251b01f64cc28a30bf902eb36ab68e144aaa Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:25:28 +0000 Subject: Minimise difference with default. --- src/rabbit_mnesia.erl | 61 +++++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 26fda4e9..e63e5de2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -387,8 +387,34 @@ init_db(ClusterNodes, Force) -> {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {_, _} -> - ok = setup_existing_node(ClusterNodes, Nodes) + {[], _} -> + %% We're the first node up + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade(local) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; + {[AnotherNode|_], _} -> + %% Subsequent node in cluster, catch up + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + ok = wait_for_tables(), + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + case rabbit_upgrade:maybe_upgrade(local) of + ok -> ok; + %% If we're just starting up a new node we won't have + %% a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end, + ensure_schema_ok() end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -397,37 +423,6 @@ init_db(ClusterNodes, Force) -> throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) end. -setup_existing_node(ClusterNodes, Nodes) -> - case Nodes of - [] -> - %% We're the first node up - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade(local) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; - [AnotherNode|_] -> - %% Subsequent node in cluster, catch up - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - ok = wait_for_tables(), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), - case rabbit_upgrade:maybe_upgrade(local) of - ok -> ok; - %% If we're just starting up a new node we won't have - %% a version - version_not_available -> ok = rabbit_upgrade:write_version() - end, - ensure_schema_ok() - end. - schema_ok_or_move() -> case check_schema_integrity() of ok -> -- cgit v1.2.1 From 49174fa2a3610b6158fe70744935b0bf885a1e9e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 17:03:56 +0000 Subject: Revert this to the old version that we want. --- src/rabbit_upgrade.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9ce9b385..23dd416a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -211,8 +211,8 @@ edges(_Module, Steps, Scope0) -> Require <- Requires, Scope0 == Scope1]. -unknown_heads(Heads, Known) -> - lists:filter(fun(H) -> not lists:member(H, Known) end, Heads). +unknown_heads(Heads, G) -> + [H || H <- Heads, digraph:vertex(G, H) =:= false]. upgrades_to_apply(Heads, G) -> %% Take all the vertices which can reach the known heads. That's -- cgit v1.2.1 From 02c2dd6844e132b64abef90ecfdf01c9e9124d8d Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 12 Jan 2011 11:59:09 +0000 Subject: swap union and intersection --- src/rabbit_variable_queue.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index c678236f..07297f63 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -1423,8 +1423,8 @@ msgs_written_to_disk(QPid, GuidSet, written) -> msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), State #vqstate { msgs_on_disk = - gb_sets:intersection( - gb_sets:union(MOD, GuidSet), UC) }) + gb_sets:union( + MOD, gb_sets:intersection(UC, GuidSet)) }) end). msg_indices_written_to_disk(QPid, GuidSet) -> @@ -1435,8 +1435,8 @@ msg_indices_written_to_disk(QPid, GuidSet) -> msgs_confirmed(gb_sets:intersection(GuidSet, MOD), State #vqstate { msg_indices_on_disk = - gb_sets:intersection( - gb_sets:union(MIOD, GuidSet), UC) }) + gb_sets:union( + MIOD, gb_sets:intersection(UC, GuidSet)) }) end). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From d9235728acd857cea1240ab84f64dfa16bfdff54 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 12:01:09 +0000 Subject: rabbit_mnesia:all_clustered_nodes/0 does not return disc nodes only. Duh. But we can do better anyway: allow any disc node to do the upgrade. --- src/rabbit_mnesia.erl | 5 ++- src/rabbit_upgrade.erl | 99 +++++++++++++++++++++++++++++++++----------------- 2 files changed, 70 insertions(+), 34 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e63e5de2..47e68c87 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,8 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1, create_cluster_nodes_config/1]). + empty_ram_only_tables/0, copy_db/1, + create_cluster_nodes_config/1, read_cluster_nodes_config/0]). -export([table_names/0]). @@ -71,6 +72,8 @@ -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). +-spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). +-spec(read_cluster_nodes_config/0 :: () -> [node()]). -endif. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 23dd416a..dcbffd03 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -22,7 +22,8 @@ -module(rabbit_upgrade). -export([maybe_upgrade_mnesia/0, maybe_upgrade/1]). --export([read_version/0, write_version/0, desired_version/0]). +-export([read_version/0, write_version/0, desired_version/0, + desired_version/1]). -include("rabbit.hrl"). @@ -43,6 +44,7 @@ -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). +-spec(desired_version/1 :: (scope()) -> [step()]). -endif. @@ -52,13 +54,10 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), case upgrades_required(mnesia) of [_|_] = Upgrades -> - DiscNodes = rabbit_mnesia:all_clustered_nodes(), - Upgrader = upgrader(DiscNodes), - case node() of - Upgrader -> - primary_upgrade(Upgrades, DiscNodes); - _ -> - non_primary_upgrade(Upgrader, DiscNodes) + Nodes = rabbit_mnesia:all_clustered_nodes(), + case am_i_upgrader(Nodes) of + true -> primary_upgrade(Upgrades, Nodes); + false -> non_primary_upgrade(Nodes) end; [] -> ok; @@ -66,12 +65,57 @@ maybe_upgrade_mnesia() -> ok end. -upgrader(Nodes) -> - [Upgrader|_] = lists:usort(Nodes), - Upgrader. +am_i_upgrader(Nodes) -> + Running = nodes_running(Nodes), + case Running of + [] -> + case am_i_disc_node() of + true -> + true; + false -> + die("Cluster upgrade needed but this is a ram node.~n " + "Please start any of the disc nodes first.", []) + end; + [Another|_] -> + ClusterVersion = + case rpc:call(Another, + rabbit_upgrade, desired_version, [mnesia]) of + {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; + {badrpc, Reason} -> {unknown, Reason}; + V -> V + end, + case desired_version(mnesia) of + ClusterVersion -> + %% The other node(s) have upgraded already, I am not the + %% upgrader + false; + MyVersion -> + %% The other node(s) are running an unexpected version. + die("Cluster upgrade needed but other nodes are " + "running ~p~n" + "and I want ~p", [ClusterVersion, MyVersion]) + end + end. + +am_i_disc_node() -> + %% The cluster config does not list all disc nodes, but it will list us + %% if we're one. + case rabbit_mnesia:read_cluster_nodes_config() of + [] -> true; + DiscNodes -> lists:member(node(), DiscNodes) + end. -primary_upgrade(Upgrades, DiscNodes) -> - Others = DiscNodes -- [node()], +die(Msg, Args) -> + %% We don't throw or exit here since that gets thrown + %% straight out into do_boot, generating an erl_crash.dump + %% and displaying any error message in a confusing way. + error_logger:error_msg(Msg, Args), + io:format("~n~n** " ++ Msg ++ " **~n~n~n", Args), + error_logger:logfile(close), + halt(1). + +primary_upgrade(Upgrades, Nodes) -> + Others = Nodes -- [node()], apply_upgrades( mnesia, Upgrades, @@ -87,26 +131,15 @@ primary_upgrade(Upgrades, DiscNodes) -> force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. -non_primary_upgrade(Upgrader, DiscNodes) -> - case node_running(Upgrader) of - false -> - Msg = "~n~n * Cluster upgrade needed. Please start node ~s " - "first. * ~n~n~n", - Args = [Upgrader], - %% We don't throw or exit here since that gets thrown - %% straight out into do_boot, generating an erl_crash.dump - %% and displaying any error message in a confusing way. - error_logger:error_msg(Msg, Args), - io:format(Msg, Args), - error_logger:logfile(close), - halt(1); - true -> - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema), - ok = rabbit_mnesia:create_cluster_nodes_config(DiscNodes), - write_version(mnesia), - ok - end. +non_primary_upgrade(Nodes) -> + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema), + ok = rabbit_mnesia:create_cluster_nodes_config(Nodes), + write_version(mnesia), + ok. + +nodes_running(Nodes) -> + [N || N <- Nodes, node_running(N)]. node_running(Node) -> case rpc:call(Node, application, which_applications, []) of -- cgit v1.2.1 From 81fd88b601cfa099f052f4270317248c6f870e72 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 12:43:25 +0000 Subject: Remove pointless differences from default. --- src/rabbit_mnesia.erl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 47e68c87..6523a036 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -44,8 +44,6 @@ %% other mnesia-using Erlang applications, such as ejabberd -export([create_tables/0]). --define(EXAMPLE_RABBIT_TABLE, rabbit_durable_exchange). - -include("rabbit.hrl"). %%---------------------------------------------------------------------------- @@ -164,7 +162,7 @@ nodes_of_type(Type) -> %% Specifically, we check whether a certain table, which we know %% will be written to disk on a disc node, is stored on disk or in %% RAM. - mnesia:table_info(?EXAMPLE_RABBIT_TABLE, Type). + mnesia:table_info(rabbit_durable_exchange, Type). table_definitions() -> [{rabbit_user, @@ -401,7 +399,6 @@ init_db(ClusterNodes, Force) -> %% Subsequent node in cluster, catch up ensure_version_ok( rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - ok = wait_for_tables(), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), -- cgit v1.2.1 From d19649eec7e0eb34fbf16b906d36a713b9737c5b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 12:54:12 +0000 Subject: Detect old-style versions properly. --- src/rabbit_upgrade.erl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dcbffd03..a570df4a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -159,9 +159,9 @@ maybe_upgrade(Scope) -> read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> case orddict:find(mnesia, V) of - error -> {ok, convert_old_version(V)}; - _ -> {ok, V} + {ok, [V]} -> case is_orddict(V) of + false -> {ok, convert_old_version(V)}; + true -> {ok, V} end; {error, _} = Err -> Err end. @@ -315,3 +315,9 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% NB: we cannot use rabbit_log here since it may not have been %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). + +%% This doesn't check it's ordered but that's not needed for our purposes +is_orddict(Thing) -> + is_list(Thing) andalso + lists:all(fun(Item) -> is_tuple(Item) andalso size(Item) == 2 end, + Thing). -- cgit v1.2.1 From 87e8c18729974033ecef50a6b91b336e04189a15 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 13:05:53 +0000 Subject: And fix again. --- src/rabbit_upgrade.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index a570df4a..4bf8d661 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -159,7 +159,7 @@ maybe_upgrade(Scope) -> read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> case is_orddict(V) of + {ok, [V]} -> case is_new_version(V) of false -> {ok, convert_old_version(V)}; true -> {ok, V} end; @@ -316,8 +316,8 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). -%% This doesn't check it's ordered but that's not needed for our purposes -is_orddict(Thing) -> - is_list(Thing) andalso +is_new_version(Version) -> + is_list(Version) andalso + length(Version) > 0 andalso lists:all(fun(Item) -> is_tuple(Item) andalso size(Item) == 2 end, - Thing). + Version). -- cgit v1.2.1 From 3821445acd31339a98af2ab0508f092ec06332d2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 13:31:41 +0000 Subject: Don't display a cluster-related message on a single node. --- src/rabbit_upgrade.erl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 4bf8d661..2c4dad87 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -120,11 +120,16 @@ primary_upgrade(Upgrades, Nodes) -> mnesia, Upgrades, fun () -> - info("mnesia upgrades: Breaking cluster~n", []), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) - || Node <- Others] + case Others of + [] -> + ok; + _ -> + info("mnesia upgrades: Breaking cluster~n", []), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] + end end), ok. -- cgit v1.2.1 From e45219e2eea0ef94646518a122dedf6f39fadc2f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 14:03:49 +0000 Subject: Break the bad news rather than just timing out wait_for_tables as we traditionally have done. --- src/rabbit_upgrade.erl | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 2c4dad87..53ed99d3 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -52,9 +52,9 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), + Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of [_|_] = Upgrades -> - Nodes = rabbit_mnesia:all_clustered_nodes(), case am_i_upgrader(Nodes) of true -> primary_upgrade(Upgrades, Nodes); false -> non_primary_upgrade(Nodes) @@ -62,7 +62,15 @@ maybe_upgrade_mnesia() -> [] -> ok; version_not_available -> - ok + case Nodes of + [_] -> + ok; + _ -> + die("Cluster upgrade needed but upgrading from < 2.1.1.~n" + " Unfortunately you will need to rebuild the " + "cluster.", + []) + end end. am_i_upgrader(Nodes) -> -- cgit v1.2.1 From 90d3914c6aab0b510c42000d00615d5c51ec4345 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 14:43:32 +0000 Subject: Cosmetic. --- src/rabbit_mnesia.erl | 3 ++- src/rabbit_upgrade.erl | 34 ++++++++++++++-------------------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 6523a036..ee6ede35 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -408,7 +408,8 @@ init_db(ClusterNodes, Force) -> false -> ram end), case rabbit_upgrade:maybe_upgrade(local) of - ok -> ok; + ok -> + ok; %% If we're just starting up a new node we won't have %% a version version_not_available -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 53ed99d3..6df881fd 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -63,13 +63,10 @@ maybe_upgrade_mnesia() -> ok; version_not_available -> case Nodes of - [_] -> - ok; - _ -> - die("Cluster upgrade needed but upgrading from < 2.1.1.~n" - " Unfortunately you will need to rebuild the " - "cluster.", - []) + [_] -> ok; + _ -> die("Cluster upgrade needed but upgrading from " + "< 2.1.1.~n Unfortunately you will need to " + "rebuild the cluster.", []) end end. @@ -78,11 +75,10 @@ am_i_upgrader(Nodes) -> case Running of [] -> case am_i_disc_node() of - true -> - true; - false -> - die("Cluster upgrade needed but this is a ram node.~n " - "Please start any of the disc nodes first.", []) + true -> true; + false -> die("Cluster upgrade needed but this is a ram " + "node.~n Please start any of the disc nodes " + "first.", []) end; [Another|_] -> ClusterVersion = @@ -100,8 +96,8 @@ am_i_upgrader(Nodes) -> MyVersion -> %% The other node(s) are running an unexpected version. die("Cluster upgrade needed but other nodes are " - "running ~p~n" - "and I want ~p", [ClusterVersion, MyVersion]) + "running ~p~nand I want ~p", + [ClusterVersion, MyVersion]) end end. @@ -131,12 +127,10 @@ primary_upgrade(Upgrades, Nodes) -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), case Others of - [] -> - ok; - _ -> - info("mnesia upgrades: Breaking cluster~n", []), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) - || Node <- Others] + [] -> ok; + _ -> info("mnesia upgrades: Breaking cluster~n", []), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] end end), ok. -- cgit v1.2.1 From cdac52bb3fd241ebada25b21841ca1d4a26dbe23 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 18 Jan 2011 14:07:48 +0000 Subject: Satisfying bit first: remove rabbitmq-multi and all trivial references. --- Makefile | 4 +- docs/rabbitmq-multi.1.xml | 100 ----------------------------- docs/rabbitmq-server.1.xml | 1 - docs/rabbitmq.conf.5.xml | 1 - packaging/RPMS/Fedora/rabbitmq-server.spec | 1 - packaging/debs/Debian/debian/rules | 2 +- packaging/macports/Portfile.in | 13 ++-- packaging/windows/Makefile | 1 - scripts/rabbitmq-multi | 87 ------------------------- scripts/rabbitmq-multi.bat | 99 ---------------------------- 10 files changed, 8 insertions(+), 301 deletions(-) delete mode 100644 docs/rabbitmq-multi.1.xml delete mode 100755 scripts/rabbitmq-multi delete mode 100644 scripts/rabbitmq-multi.bat diff --git a/Makefile b/Makefile index 00bfd629..b7171651 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ TARGETS=$(EBIN_DIR)/rabbit.app $(INCLUDE_DIR)/rabbit_framing.hrl $(BEAM_TARGETS) WEB_URL=http://www.rabbitmq.com/ MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml)) WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml) -USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-multi.1.xml +USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML))) ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes) @@ -270,7 +270,7 @@ install_bin: all install_dirs cp -r ebin include LICENSE LICENSE-MPL-RabbitMQ INSTALL $(TARGET_DIR) chmod 0755 scripts/* - for script in rabbitmq-env rabbitmq-server rabbitmqctl rabbitmq-multi; do \ + for script in rabbitmq-env rabbitmq-server rabbitmqctl; do \ cp scripts/$$script $(TARGET_DIR)/sbin; \ [ -e $(SBIN_DIR)/$$script ] || ln -s $(SCRIPTS_REL_PATH)/$$script $(SBIN_DIR)/$$script; \ done diff --git a/docs/rabbitmq-multi.1.xml b/docs/rabbitmq-multi.1.xml deleted file mode 100644 index 6586890a..00000000 --- a/docs/rabbitmq-multi.1.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - - - RabbitMQ Server - - The RabbitMQ Team <info@rabbitmq.com> - - - - - rabbitmq-multi - 1 - RabbitMQ Server - - - - rabbitmq-multi - start/stop local cluster RabbitMQ nodes - - - - - rabbitmq-multi - command - command options - - - - - Description - - RabbitMQ is an implementation of AMQP, the emerging standard for high -performance enterprise messaging. The RabbitMQ server is a robust and -scalable implementation of an AMQP broker. - - -rabbitmq-multi scripts allows for easy set-up of a cluster on a single -machine. - - - - - Commands - - - start_all count - - -Start count nodes with unique names, listening on all IP addresses and -on sequential ports starting from 5672. - - For example: - rabbitmq-multi start_all 3 - - Starts 3 local RabbitMQ nodes with unique, sequential port numbers. - - - - - - status - - -Print the status of all running RabbitMQ nodes. - - - - - - stop_all - - -Stop all local RabbitMQ nodes, - - - - - - rotate_logs - - -Rotate log files for all local and running RabbitMQ nodes. - - - - - - - - - - See also - - rabbitmq.conf5 - rabbitmq-server1 - rabbitmqctl1 - - - diff --git a/docs/rabbitmq-server.1.xml b/docs/rabbitmq-server.1.xml index 687a9c39..0fb3c48f 100644 --- a/docs/rabbitmq-server.1.xml +++ b/docs/rabbitmq-server.1.xml @@ -124,7 +124,6 @@ Defaults to 5672. See also rabbitmq.conf5 - rabbitmq-multi1 rabbitmqctl1 diff --git a/docs/rabbitmq.conf.5.xml b/docs/rabbitmq.conf.5.xml index 31de7164..8b95e55c 100644 --- a/docs/rabbitmq.conf.5.xml +++ b/docs/rabbitmq.conf.5.xml @@ -76,7 +76,6 @@ NODENAME=hare See also - rabbitmq-multi1 rabbitmq-server1 rabbitmqctl1 diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index b37f7ab1..e3383ea6 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -55,7 +55,6 @@ mkdir -p %{buildroot}%{_localstatedir}/log/rabbitmq install -p -D -m 0755 %{S:1} %{buildroot}%{_initrddir}/rabbitmq-server install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmqctl install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmq-server -install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmq-multi install -p -D -m 0755 %{_rabbit_server_ocf} %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server install -p -D -m 0644 %{S:3} %{buildroot}%{_sysconfdir}/logrotate.d/rabbitmq-server diff --git a/packaging/debs/Debian/debian/rules b/packaging/debs/Debian/debian/rules index 6b6df33b..a785b292 100644 --- a/packaging/debs/Debian/debian/rules +++ b/packaging/debs/Debian/debian/rules @@ -14,7 +14,7 @@ DOCDIR=$(DEB_DESTDIR)usr/share/doc/rabbitmq-server/ install/rabbitmq-server:: mkdir -p $(DOCDIR) rm $(RABBIT_LIB)LICENSE* $(RABBIT_LIB)INSTALL* - for script in rabbitmqctl rabbitmq-server rabbitmq-multi; do \ + for script in rabbitmqctl rabbitmq-server; do \ install -p -D -m 0755 debian/rabbitmq-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \ done sed -e 's|@RABBIT_LIB@|/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)|g' debian/postrm diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index f8417b83..96c82670 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -85,24 +85,21 @@ post-destroot { ${realsbin}/rabbitmq-env foreach var {CONFIG_FILE LOG_BASE MNESIA_BASE PIDS_FILE} { reinplace -E "s:^($var)=/:\\1=${prefix}/:" \ - ${realsbin}/rabbitmq-multi \ ${realsbin}/rabbitmq-server \ ${realsbin}/rabbitmqctl } xinstall -m 555 ${filespath}/rabbitmq-script-wrapper \ - ${wrappersbin}/rabbitmq-multi + ${wrappersbin}/rabbitmq-server reinplace -E "s:MACPORTS_PREFIX/bin:${prefix}/bin:" \ - ${wrappersbin}/rabbitmq-multi + ${wrappersbin}/rabbitmq-server reinplace -E "s:/usr/lib/rabbitmq/bin/:${prefix}/lib/rabbitmq/bin/:" \ - ${wrappersbin}/rabbitmq-multi + ${wrappersbin}/rabbitmq-server reinplace -E "s:/var/lib/rabbitmq:${prefix}/var/lib/rabbitmq:" \ - ${wrappersbin}/rabbitmq-multi - file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmq-server - file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmqctl + ${wrappersbin}/rabbitmq-server + file copy ${wrappersbin}/rabbitmq-server ${wrappersbin}/rabbitmqctl - file copy ${mansrc}/man1/rabbitmq-multi.1.gz ${mandest}/man1/ file copy ${mansrc}/man1/rabbitmq-server.1.gz ${mandest}/man1/ file copy ${mansrc}/man1/rabbitmqctl.1.gz ${mandest}/man1/ file copy ${mansrc}/man5/rabbitmq.conf.5.gz ${mandest}/man5/ diff --git a/packaging/windows/Makefile b/packaging/windows/Makefile index abe174e0..dacfa620 100644 --- a/packaging/windows/Makefile +++ b/packaging/windows/Makefile @@ -11,7 +11,6 @@ dist: mv $(SOURCE_DIR)/scripts/rabbitmq-server.bat $(SOURCE_DIR)/sbin mv $(SOURCE_DIR)/scripts/rabbitmq-service.bat $(SOURCE_DIR)/sbin mv $(SOURCE_DIR)/scripts/rabbitmqctl.bat $(SOURCE_DIR)/sbin - mv $(SOURCE_DIR)/scripts/rabbitmq-multi.bat $(SOURCE_DIR)/sbin rm -rf $(SOURCE_DIR)/scripts rm -rf $(SOURCE_DIR)/codegen* $(SOURCE_DIR)/Makefile rm -f $(SOURCE_DIR)/README diff --git a/scripts/rabbitmq-multi b/scripts/rabbitmq-multi deleted file mode 100755 index 33883702..00000000 --- a/scripts/rabbitmq-multi +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/sh -## The contents of this file are subject to the Mozilla Public License -## Version 1.1 (the "License"); you may not use this file except in -## compliance with the License. You may obtain a copy of the License at -## http://www.mozilla.org/MPL/ -## -## Software distributed under the License is distributed on an "AS IS" -## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -## License for the specific language governing rights and limitations -## under the License. -## -## The Original Code is RabbitMQ. -## -## The Initial Developers of the Original Code are LShift Ltd, -## Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -## Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -## are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -## Technologies LLC, and Rabbit Technologies Ltd. -## -## Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -## Ltd. Portions created by Cohesive Financial Technologies LLC are -## Copyright (C) 2007-2010 Cohesive Financial Technologies -## LLC. Portions created by Rabbit Technologies Ltd are Copyright -## (C) 2007-2010 Rabbit Technologies Ltd. -## -## All Rights Reserved. -## -## Contributor(s): ______________________________________. -## - -SCRIPT_HOME=$(dirname $0) -PIDS_FILE=/var/lib/rabbitmq/pids -MULTI_ERL_ARGS= -MULTI_START_ARGS= -CONFIG_FILE=/etc/rabbitmq/rabbitmq - -. `dirname $0`/rabbitmq-env - -DEFAULT_NODE_IP_ADDRESS=0.0.0.0 -DEFAULT_NODE_PORT=5672 -[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS} -[ "x" = "x$RABBITMQ_NODE_PORT" ] && [ "x" != "x$NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT} -if [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] -then - if [ "x" != "x$RABBITMQ_NODE_PORT" ] - then RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS} - fi -else - if [ "x" = "x$RABBITMQ_NODE_PORT" ] - then RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT} - fi -fi -[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME} -[ "x" = "x$RABBITMQ_SCRIPT_HOME" ] && RABBITMQ_SCRIPT_HOME=${SCRIPT_HOME} -[ "x" = "x$RABBITMQ_PIDS_FILE" ] && RABBITMQ_PIDS_FILE=${PIDS_FILE} -[ "x" = "x$RABBITMQ_MULTI_ERL_ARGS" ] && RABBITMQ_MULTI_ERL_ARGS=${MULTI_ERL_ARGS} -[ "x" = "x$RABBITMQ_MULTI_START_ARGS" ] && RABBITMQ_MULTI_START_ARGS=${MULTI_START_ARGS} -[ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE} - -export \ - RABBITMQ_NODENAME \ - RABBITMQ_NODE_IP_ADDRESS \ - RABBITMQ_NODE_PORT \ - RABBITMQ_SCRIPT_HOME \ - RABBITMQ_PIDS_FILE \ - RABBITMQ_CONFIG_FILE - -RABBITMQ_CONFIG_ARG= -[ -f "${RABBITMQ_CONFIG_FILE}.config" ] && RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}" - -# we need to turn off path expansion because some of the vars, notably -# RABBITMQ_MULTI_ERL_ARGS, may contain terms that look like globs and -# there is no other way of preventing their expansion. -set -f - -exec erl \ - -pa "${RABBITMQ_HOME}/ebin" \ - -noinput \ - -hidden \ - ${RABBITMQ_MULTI_ERL_ARGS} \ - -sname rabbitmq_multi$$ \ - ${RABBITMQ_CONFIG_ARG} \ - -s rabbit_multi \ - ${RABBITMQ_MULTI_START_ARGS} \ - -extra "$@" diff --git a/scripts/rabbitmq-multi.bat b/scripts/rabbitmq-multi.bat deleted file mode 100644 index ec61dc99..00000000 --- a/scripts/rabbitmq-multi.bat +++ /dev/null @@ -1,99 +0,0 @@ -@echo off -REM The contents of this file are subject to the Mozilla Public License -REM Version 1.1 (the "License"); you may not use this file except in -REM compliance with the License. You may obtain a copy of the License at -REM http://www.mozilla.org/MPL/ -REM -REM Software distributed under the License is distributed on an "AS IS" -REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -REM License for the specific language governing rights and limitations -REM under the License. -REM -REM The Original Code is RabbitMQ. -REM -REM The Initial Developers of the Original Code are LShift Ltd, -REM Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -REM Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -REM are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -REM Technologies LLC, and Rabbit Technologies Ltd. -REM -REM Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -REM Ltd. Portions created by Cohesive Financial Technologies LLC are -REM Copyright (C) 2007-2010 Cohesive Financial Technologies -REM LLC. Portions created by Rabbit Technologies Ltd are Copyright -REM (C) 2007-2010 Rabbit Technologies Ltd. -REM -REM All Rights Reserved. -REM -REM Contributor(s): ______________________________________. -REM - -setlocal - -rem Preserve values that might contain exclamation marks before -rem enabling delayed expansion -set TDP0=%~dp0 -set STAR=%* -setlocal enabledelayedexpansion - -if "!RABBITMQ_BASE!"=="" ( - set RABBITMQ_BASE=!APPDATA!\RabbitMQ -) - -if "!COMPUTERNAME!"=="" ( - set COMPUTERNAME=localhost -) - -if "!RABBITMQ_NODENAME!"=="" ( - set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME! -) - -if "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( - if not "!RABBITMQ_NODE_PORT!"=="" ( - set RABBITMQ_NODE_IP_ADDRESS=0.0.0.0 - ) -) else ( - if "!RABBITMQ_NODE_PORT!"=="" ( - set RABBITMQ_NODE_PORT=5672 - ) -) - -set RABBITMQ_PIDS_FILE=!RABBITMQ_BASE!\rabbitmq.pids -set RABBITMQ_SCRIPT_HOME=!TDP0! - -if "!RABBITMQ_CONFIG_FILE!"=="" ( - set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq -) - -if exist "!RABBITMQ_CONFIG_FILE!.config" ( - set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!" -) else ( - set RABBITMQ_CONFIG_ARG= -) - -if not exist "!ERLANG_HOME!\bin\erl.exe" ( - echo. - echo ****************************** - echo ERLANG_HOME not set correctly. - echo ****************************** - echo. - echo Please either set ERLANG_HOME to point to your Erlang installation or place the - echo RabbitMQ server distribution in the Erlang lib folder. - echo. - exit /B -) - -"!ERLANG_HOME!\bin\erl.exe" ^ --pa "!TDP0!..\ebin" ^ --noinput -hidden ^ -!RABBITMQ_MULTI_ERL_ARGS! ^ --sname rabbitmq_multi!RANDOM! ^ -!RABBITMQ_CONFIG_ARG! ^ --s rabbit_multi ^ -!RABBITMQ_MULTI_START_ARGS! ^ --extra !STAR! - -endlocal -endlocal -- cgit v1.2.1 From 4a7803245612862f0eaa34597affb4ed1bcbbc77 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 18 Jan 2011 14:18:36 +0000 Subject: Er, and this too, of course :) --- src/rabbit_multi.erl | 362 --------------------------------------------------- 1 file changed, 362 deletions(-) delete mode 100644 src/rabbit_multi.erl diff --git a/src/rabbit_multi.erl b/src/rabbit_multi.erl deleted file mode 100644 index 0030216e..00000000 --- a/src/rabbit_multi.erl +++ /dev/null @@ -1,362 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. -%% - --module(rabbit_multi). --include("rabbit.hrl"). - --export([start/0, stop/0]). - --define(RPC_SLEEP, 500). - -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --spec(start/0 :: () -> no_return()). --spec(stop/0 :: () -> 'ok'). --spec(usage/0 :: () -> no_return()). - --endif. - -%%---------------------------------------------------------------------------- - -start() -> - RpcTimeout = - case init:get_argument(maxwait) of - {ok,[[N1]]} -> 1000 * list_to_integer(N1); - _ -> ?MAX_WAIT - end, - case init:get_plain_arguments() of - [] -> - usage(); - FullCommand -> - {Command, Args} = parse_args(FullCommand), - case catch action(Command, Args, RpcTimeout) of - ok -> - io:format("done.~n"), - halt(); - {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> - print_error("invalid command '~s'", - [string:join(FullCommand, " ")]), - usage(); - timeout -> - print_error("timeout starting some nodes.", []), - halt(1); - Other -> - print_error("~p", [Other]), - halt(2) - end - end. - -print_error(Format, Args) -> - rabbit_misc:format_stderr("Error: " ++ Format ++ "~n", Args). - -parse_args([Command | Args]) -> - {list_to_atom(Command), Args}. - -stop() -> - ok. - -usage() -> - io:format("~s", [rabbit_multi_usage:usage()]), - halt(1). - -action(start_all, [NodeCount], RpcTimeout) -> - io:format("Starting all nodes...~n", []), - application:load(rabbit), - {_NodeNamePrefix, NodeHost} = NodeName = rabbit_misc:nodeparts( - getenv("RABBITMQ_NODENAME")), - case net_adm:names(NodeHost) of - {error, EpmdReason} -> - throw({cannot_connect_to_epmd, NodeHost, EpmdReason}); - {ok, _} -> - ok - end, - {NodePids, Running} = - case list_to_integer(NodeCount) of - 1 -> {NodePid, Started} = start_node(rabbit_misc:makenode(NodeName), - RpcTimeout), - {[NodePid], Started}; - N -> start_nodes(N, N, [], true, NodeName, - get_node_tcp_listener(), RpcTimeout) - end, - write_pids_file(NodePids), - case Running of - true -> ok; - false -> timeout - end; - -action(status, [], RpcTimeout) -> - io:format("Status of all running nodes...~n", []), - call_all_nodes( - fun ({Node, Pid}) -> - RabbitRunning = - case is_rabbit_running(Node, RpcTimeout) of - false -> not_running; - true -> running - end, - io:format("Node '~p' with Pid ~p: ~p~n", - [Node, Pid, RabbitRunning]) - end); - -action(stop_all, [], RpcTimeout) -> - io:format("Stopping all nodes...~n", []), - call_all_nodes(fun ({Node, Pid}) -> - io:format("Stopping node ~p~n", [Node]), - rpc:call(Node, rabbit, stop_and_halt, []), - case kill_wait(Pid, RpcTimeout, false) of - false -> kill_wait(Pid, RpcTimeout, true); - true -> ok - end, - io:format("OK~n", []) - end), - delete_pids_file(); - -action(rotate_logs, [], RpcTimeout) -> - action(rotate_logs, [""], RpcTimeout); - -action(rotate_logs, [Suffix], RpcTimeout) -> - io:format("Rotating logs for all nodes...~n", []), - BinarySuffix = list_to_binary(Suffix), - call_all_nodes( - fun ({Node, _}) -> - io:format("Rotating logs for node ~p", [Node]), - case rpc:call(Node, rabbit, rotate_logs, - [BinarySuffix], RpcTimeout) of - {badrpc, Error} -> io:format(": ~p.~n", [Error]); - ok -> io:format(": ok.~n", []) - end - end). - -%% PNodePid is the list of PIDs -%% Running is a boolean exhibiting success at some moment -start_nodes(0, _, PNodePid, Running, _, _, _) -> {PNodePid, Running}; - -start_nodes(N, Total, PNodePid, Running, NodeNameBase, Listener, RpcTimeout) -> - {NodePre, NodeSuff} = NodeNameBase, - NodeNumber = Total - N, - NodePre1 = case NodeNumber of - %% For compatibility with running a single node - 0 -> NodePre; - _ -> NodePre ++ "_" ++ integer_to_list(NodeNumber) - end, - Node = rabbit_misc:makenode({NodePre1, NodeSuff}), - os:putenv("RABBITMQ_NODENAME", atom_to_list(Node)), - case Listener of - {NodeIpAddress, NodePortBase} -> - NodePort = NodePortBase + NodeNumber, - os:putenv("RABBITMQ_NODE_PORT", integer_to_list(NodePort)), - os:putenv("RABBITMQ_NODE_IP_ADDRESS", NodeIpAddress); - undefined -> - ok - end, - {NodePid, Started} = start_node(Node, RpcTimeout), - start_nodes(N - 1, Total, [NodePid | PNodePid], - Started and Running, NodeNameBase, Listener, RpcTimeout). - -start_node(Node, RpcTimeout) -> - io:format("Starting node ~s...~n", [Node]), - case rpc:call(Node, os, getpid, []) of - {badrpc, _} -> - Port = run_rabbitmq_server(), - Started = wait_for_rabbit_to_start(Node, RpcTimeout, Port), - Pid = case rpc:call(Node, os, getpid, []) of - {badrpc, _} -> throw(cannot_get_pid); - PidS -> list_to_integer(PidS) - end, - io:format("~s~n", [case Started of - true -> "OK"; - false -> "timeout" - end]), - {{Node, Pid}, Started}; - PidS -> - Pid = list_to_integer(PidS), - throw({node_already_running, Node, Pid}) - end. - -wait_for_rabbit_to_start(_ , RpcTimeout, _) when RpcTimeout < 0 -> - false; -wait_for_rabbit_to_start(Node, RpcTimeout, Port) -> - case is_rabbit_running(Node, RpcTimeout) of - true -> true; - false -> receive - {'EXIT', Port, PosixCode} -> - throw({node_start_failed, PosixCode}) - after ?RPC_SLEEP -> - wait_for_rabbit_to_start( - Node, RpcTimeout - ?RPC_SLEEP, Port) - end - end. - -run_rabbitmq_server() -> - with_os([{unix, fun run_rabbitmq_server_unix/0}, - {win32, fun run_rabbitmq_server_win32/0}]). - -run_rabbitmq_server_unix() -> - CmdLine = getenv("RABBITMQ_SCRIPT_HOME") ++ "/rabbitmq-server -noinput", - erlang:open_port({spawn, CmdLine}, [nouse_stdio]). - -run_rabbitmq_server_win32() -> - Cmd = filename:nativename(os:find_executable("cmd")), - CmdLine = "\"" ++ getenv("RABBITMQ_SCRIPT_HOME") ++ - "\\rabbitmq-server.bat\" -noinput -detached", - erlang:open_port({spawn_executable, Cmd}, - [{arg0, Cmd}, {args, ["/q", "/s", "/c", CmdLine]}, - nouse_stdio]). - -is_rabbit_running(Node, RpcTimeout) -> - case rpc:call(Node, rabbit, status, [], RpcTimeout) of - {badrpc, _} -> false; - Status -> case proplists:get_value(running_applications, Status) of - undefined -> false; - Apps -> lists:keymember(rabbit, 1, Apps) - end - end. - -with_os(Handlers) -> - {OsFamily, _} = os:type(), - case proplists:get_value(OsFamily, Handlers) of - undefined -> throw({unsupported_os, OsFamily}); - Handler -> Handler() - end. - -pids_file() -> getenv("RABBITMQ_PIDS_FILE"). - -write_pids_file(Pids) -> - FileName = pids_file(), - Handle = case file:open(FileName, [write]) of - {ok, Device} -> - Device; - {error, Reason} -> - throw({cannot_create_pids_file, FileName, Reason}) - end, - try - ok = io:write(Handle, Pids), - ok = io:put_chars(Handle, [$.]) - after - case file:close(Handle) of - ok -> ok; - {error, Reason1} -> - throw({cannot_create_pids_file, FileName, Reason1}) - end - end, - ok. - -delete_pids_file() -> - FileName = pids_file(), - case file:delete(FileName) of - ok -> ok; - {error, enoent} -> ok; - {error, Reason} -> throw({cannot_delete_pids_file, FileName, Reason}) - end. - -read_pids_file() -> - FileName = pids_file(), - case file:consult(FileName) of - {ok, [Pids]} -> Pids; - {error, enoent} -> []; - {error, Reason} -> throw({cannot_read_pids_file, FileName, Reason}) - end. - -kill_wait(Pid, TimeLeft, Forceful) when TimeLeft < 0 -> - Cmd = with_os([{unix, fun () -> if Forceful -> "kill -9"; - true -> "kill" - end - end}, - %% Kill forcefully always on Windows, since erl.exe - %% seems to completely ignore non-forceful killing - %% even when everything is working - {win32, fun () -> "taskkill /f /pid" end}]), - os:cmd(Cmd ++ " " ++ integer_to_list(Pid)), - false; % Don't assume what we did just worked! - -% Returns true if the process is dead, false otherwise. -kill_wait(Pid, TimeLeft, Forceful) -> - timer:sleep(?RPC_SLEEP), - io:format(".", []), - is_dead(Pid) orelse kill_wait(Pid, TimeLeft - ?RPC_SLEEP, Forceful). - -% Test using some OS clunkiness since we shouldn't trust -% rpc:call(os, getpid, []) at this point -is_dead(Pid) -> - PidS = integer_to_list(Pid), - with_os([{unix, fun () -> - system("kill -0 " ++ PidS - ++ " >/dev/null 2>&1") /= 0 - end}, - {win32, fun () -> - Res = os:cmd("tasklist /nh /fi \"pid eq " ++ - PidS ++ "\" 2>&1"), - case re:run(Res, "erl\\.exe", [{capture, none}]) of - match -> false; - _ -> true - end - end}]). - -% Like system(3) -system(Cmd) -> - ShCmd = "sh -c '" ++ escape_quotes(Cmd) ++ "'", - Port = erlang:open_port({spawn, ShCmd}, [exit_status,nouse_stdio]), - receive {Port, {exit_status, Status}} -> Status end. - -% Escape the quotes in a shell command so that it can be used in "sh -c 'cmd'" -escape_quotes(Cmd) -> - lists:flatten(lists:map(fun ($') -> "'\\''"; (Ch) -> Ch end, Cmd)). - -call_all_nodes(Func) -> - case read_pids_file() of - [] -> throw(no_nodes_running); - NodePids -> lists:foreach(Func, NodePids) - end. - -getenv(Var) -> - case os:getenv(Var) of - false -> throw({missing_env_var, Var}); - Value -> Value - end. - -get_node_tcp_listener() -> - try - {getenv("RABBITMQ_NODE_IP_ADDRESS"), - list_to_integer(getenv("RABBITMQ_NODE_PORT"))} - catch _ -> - case application:get_env(rabbit, tcp_listeners) of - {ok, [{_IpAddy, _Port} = Listener]} -> - Listener; - {ok, []} -> - undefined; - {ok, Other} -> - throw({cannot_start_multiple_nodes, multiple_tcp_listeners, - Other}); - undefined -> - throw({missing_configuration, tcp_listeners}) - end - end. -- cgit v1.2.1 From 5e8a301e7584c6381ccac7e19d9fa9450f3533a8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 18 Jan 2011 15:39:10 +0000 Subject: That's not needed --- packaging/macports/Portfile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 96c82670..0f252424 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -83,7 +83,7 @@ post-destroot { reinplace -E "s:(/etc/rabbitmq/rabbitmq.conf):${prefix}\\1:g" \ ${realsbin}/rabbitmq-env - foreach var {CONFIG_FILE LOG_BASE MNESIA_BASE PIDS_FILE} { + foreach var {CONFIG_FILE LOG_BASE MNESIA_BASE} { reinplace -E "s:^($var)=/:\\1=${prefix}/:" \ ${realsbin}/rabbitmq-server \ ${realsbin}/rabbitmqctl -- cgit v1.2.1 From b88381ac6a1ea6badf70f0eec7384f9beb7a09bf Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 19 Jan 2011 12:49:30 +0000 Subject: Sender-specified distribution for fanout exchanges --- codegen.py | 2 +- include/rabbit.hrl | 3 +++ src/rabbit_exchange.erl | 26 +++++++++++++++++++++++++- src/rabbit_exchange_type_direct.erl | 11 +++-------- src/rabbit_exchange_type_fanout.erl | 10 ++++++++-- src/rabbit_misc.erl | 11 +---------- src/rabbit_router.erl | 5 +++-- 7 files changed, 44 insertions(+), 24 deletions(-) diff --git a/codegen.py b/codegen.py index 979c5bd8..6e9139b8 100644 --- a/codegen.py +++ b/codegen.py @@ -354,7 +354,7 @@ def genErl(spec): -type(amqp_field_type() :: 'longstr' | 'signedint' | 'decimal' | 'timestamp' | 'table' | 'byte' | 'double' | 'float' | 'long' | - 'short' | 'bool' | 'binary' | 'void'). + 'short' | 'bool' | 'binary' | 'void' | 'array'). -type(amqp_property_type() :: 'shortstr' | 'longstr' | 'octet' | 'shortint' | 'longint' | 'longlongint' | 'timestamp' | 'bit' | 'table'). diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 81c3996b..5c5fad76 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -96,6 +96,9 @@ -define(DESIRED_HIBERNATE, 10000). -define(STATS_INTERVAL, 5000). +-define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]). +-define(DELETED_HEADER, <<"BCC">>). + -ifdef(debug). -define(LOGDEBUG0(F), rabbit_log:debug(F)). -define(LOGDEBUG(F,A), rabbit_log:debug(F,A)). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a95cf0b1..d9e3431d 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -35,6 +35,7 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). +-export([header_routes/2]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -86,7 +87,8 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). - +-spec(header_routes/2 :: (rabbit_framing:amqp_table(), rabbit_types:vhost()) -> + [rabbit_types:r('queue')]). -endif. %%---------------------------------------------------------------------------- @@ -319,3 +321,25 @@ unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_exchange, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. + +header_routes(undefined, _VHost) -> + []; +header_routes(Headers, VHost) -> + [rabbit_misc:r(VHost, queue, RKey) || + RKey <- lists:flatten([routing_keys(Headers, Header) || + Header <- ?ROUTING_HEADERS])]. + +routing_keys(HeadersTable, Key) -> + case rabbit_misc:table_lookup(HeadersTable, Key) of + {longstr, Route} -> [Route]; + {array, Routes} -> rkeys(Routes, []); + _ -> [] + end. + +rkeys([{longstr, BinVal} | Rest], RKeys) -> + rkeys(Rest, [BinVal | RKeys]); +rkeys([{_, _} | Rest], RKeys) -> + rkeys(Rest, RKeys); +rkeys(_, RKeys) -> + RKeys. + diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index ab688853..9547117c 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -55,14 +55,9 @@ route(#exchange{name = #resource{virtual_host = VHost} = Name}, #delivery{message = #basic_message{routing_key = RoutingKey, content = Content}}) -> BindingRoutes = rabbit_router:match_routing_key(Name, RoutingKey), - HeaderRKeys = - case (Content#content.properties)#'P_basic'.headers of - undefined -> []; - Headers -> rabbit_misc:table_lookup(Headers, <<"CC">>, <<0>>) ++ - rabbit_misc:table_lookup(Headers, <<"BCC">>, <<0>>) - end, - HeaderRoutes = [rabbit_misc:r(VHost, queue, RKey) || RKey <- HeaderRKeys], - lists:usort(BindingRoutes ++ HeaderRoutes). + HeaderRoutes = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers, VHost), + BindingRoutes ++ HeaderRoutes. validate(_X) -> ok. create(_X) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index e7f75464..e9faf0a2 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -31,6 +31,7 @@ -module(rabbit_exchange_type_fanout). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -50,8 +51,13 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. -route(#exchange{name = Name}, _Delivery) -> - rabbit_router:match_routing_key(Name, '_'). +route(#exchange{name = #resource{virtual_host = VHost} = Name}, + #delivery{message = #basic_message{content = Content}}) -> + BindingRoutes = rabbit_router:match_routing_key(Name, '_'), + HeaderRoutes = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers, VHost), + BindingRoutes ++ HeaderRoutes. + validate(_X) -> ok. create(_X) -> ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 604346ed..15ba787a 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -40,7 +40,7 @@ protocol_error/3, protocol_error/4, protocol_error/1]). -export([not_found/1, assert_args_equivalence/4]). -export([dirty_read/1]). --export([table_lookup/3, table_lookup/2]). +-export([table_lookup/2]). -export([r/3, r/2, r_arg/4, rs/1]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). @@ -112,8 +112,6 @@ 'ok' | rabbit_types:connection_exit()). -spec(dirty_read/1 :: ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). --spec(table_lookup/3 :: - (rabbit_framing:amqp_table(), binary(), binary()) -> [binary()]). -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). @@ -255,13 +253,6 @@ dirty_read(ReadSpec) -> [] -> {error, not_found} end. -table_lookup(Table, Key, Separator) -> - case table_lookup(Table, Key) of - undefined -> []; - {longstr, BinVal} -> binary:split(BinVal, Separator, [global]); - _ -> [] - end. - table_lookup(Table, Key) -> case lists:keysearch(Key, 1, Table) of {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin}; diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 2f556df7..7f9b823e 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -69,7 +69,7 @@ deliver(QNames, Delivery = #delivery{mandatory = false, %% is preserved. This scales much better than the non-immediate %% case below. QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, <<"BCC">>), + ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), delegate:invoke_no_result( QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), {routed, QPids}; @@ -77,7 +77,7 @@ deliver(QNames, Delivery = #delivery{mandatory = false, deliver(QNames, Delivery = #delivery{mandatory = Mandatory, immediate = Immediate}) -> QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, <<"BCC">>), + ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), {Success, _} = delegate:invoke(QPids, fun (Pid) -> @@ -87,6 +87,7 @@ deliver(QNames, Delivery = #delivery{mandatory = Mandatory, lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). +%% This breaks the spec rule forbidding message modification strip_header(Delivery = #delivery{message = Message = #basic_message{ content = Content = #content{ properties = Props = #'P_basic'{headers = Headers}}}}, -- cgit v1.2.1 From 69f35fc60d84b1ffe4424dfe7d47f909bec8e423 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 19 Jan 2011 14:38:43 +0000 Subject: replace the sort with a gb_tree Instead of creating a list and sorting it, insert the MsgSeqNos into a gb_tree. Dicts and orddicts are slower. --- src/rabbit_amqqueue_process.erl | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 38b83117..b0aea012 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -431,27 +431,22 @@ confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> fun(Guid, {CMs, GTC0}) -> case dict:find(Guid, GTC0) of {ok, {ChPid, MsgSeqNo}} -> - {[{ChPid, MsgSeqNo} | CMs], dict:erase(Guid, GTC0)}; + {gb_trees_cons(ChPid, MsgSeqNo, CMs), + dict:erase(Guid, GTC0)}; _ -> {CMs, GTC0} end - end, {[], GTC}, Guids), - case lists:usort(CMs) of - [{Ch, MsgSeqNo} | CMs1] -> - [rabbit_channel:confirm(ChPid, MsgSeqNos) || - {ChPid, MsgSeqNos} <- group_confirms_by_channel( - CMs1, [{Ch, [MsgSeqNo]}])]; - [] -> - ok - end, + end, {gb_trees:empty(), GTC}, Guids), + gb_trees:map(fun(ChPid, MsgSeqNos) -> + rabbit_channel:confirm(ChPid, MsgSeqNos) + end, CMs), State#q{guid_to_channel = GTC1}. -group_confirms_by_channel([], Acc) -> - Acc; -group_confirms_by_channel([{Ch, Msg1} | CMs], [{Ch, Msgs} | Acc]) -> - group_confirms_by_channel(CMs, [{Ch, [Msg1 | Msgs]} | Acc]); -group_confirms_by_channel([{Ch, Msg1} | CMs], Acc) -> - group_confirms_by_channel(CMs, [{Ch, [Msg1]} | Acc]). +gb_trees_cons(Key, Value, Tree) -> + case gb_trees:lookup(Key, Tree) of + {value, Values} -> gb_trees:update(Key, [Value | Values], Tree); + none -> gb_trees:insert(Key, [Value], Tree) + end. record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> {no_confirm, State}; -- cgit v1.2.1 From 0b35d977d92af97c5c0d36ef890f2a4ac9a48881 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 21 Jan 2011 12:06:49 +0000 Subject: Adding gm related files, plucked from branch bug23554 --- include/gm_specs.hrl | 28 ++ src/gm.erl | 1308 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/gm_test.erl | 126 +++++ 3 files changed, 1462 insertions(+) create mode 100644 include/gm_specs.hrl create mode 100644 src/gm.erl create mode 100644 src/gm_test.erl diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl new file mode 100644 index 00000000..7f607755 --- /dev/null +++ b/include/gm_specs.hrl @@ -0,0 +1,28 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-ifdef(use_specs). + +-type(callback_result() :: 'ok' | {'stop', any()}). +-type(args() :: [any()]). +-type(members() :: [pid()]). + +-spec(joined/2 :: (args(), members()) -> callback_result()). +-spec(members_changed/3 :: (args(), members(), members()) -> callback_result()). +-spec(handle_msg/3 :: (args(), pid(), any()) -> callback_result()). +-spec(terminate/2 :: (args(), term()) -> any()). + +-endif. diff --git a/src/gm.erl b/src/gm.erl new file mode 100644 index 00000000..baf46471 --- /dev/null +++ b/src/gm.erl @@ -0,0 +1,1308 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(gm). + +%% Guaranteed Multicast +%% ==================== +%% +%% This module provides the ability to create named groups of +%% processes to which members can be dynamically added and removed, +%% and for messages to be broadcast within the group that are +%% guaranteed to reach all members of the group during the lifetime of +%% the message. The lifetime of a message is defined as being, at a +%% minimum, the time from which the message is first sent to any +%% member of the group, up until the time at which it is known by the +%% member who published the message that the message has reached all +%% group members. +%% +%% The guarantee given is that provided a message, once sent, makes it +%% to members who do not all leave the group, the message will +%% continue to propagate to all group members. +%% +%% Another way of stating the guarantee is that if member P publishes +%% messages m and m', then for all members P', if P' is a member of +%% the group prior to the publication of m, and P' receives m', then +%% P' will receive m. +%% +%% Note that only local-ordering is enforced: i.e. if member P sends +%% message m and then message m', then for-all members P', if P' +%% receives m and m', then they will receive m' after m. Causality +%% ordering is _not_ enforced. I.e. if member P receives message m +%% and as a result publishes message m', there is no guarantee that +%% other members P' will receive m before m'. +%% +%% +%% API Use +%% ------- +%% +%% Mnesia must be started. Use the idempotent create_tables/0 function +%% to create the tables required. +%% +%% start_link/3 +%% Provide the group name, the callback module name, and a list of any +%% arguments you wish to be passed into the callback module's +%% functions. The joined/1 will be called when we have joined the +%% group, and the list of arguments will have appended to it a list of +%% the current members of the group. See the comments in +%% behaviour_info/1 below for further details of the callback +%% functions. +%% +%% leave/1 +%% Provide the Pid. Removes the Pid from the group. The callback +%% terminate/1 function will be called. +%% +%% broadcast/2 +%% Provide the Pid and a Message. The message will be sent to all +%% members of the group as per the guarantees given above. This is a +%% cast and the function call will return immediately. There is no +%% guarantee that the message will reach any member of the group. +%% +%% confirmed_broadcast/2 +%% Provide the Pid and a Message. As per broadcast/2 except that this +%% is a call, not a cast, and only returns 'ok' once the Message has +%% reached every member of the group. Do not call +%% confirmed_broadcast/2 directly from the callback module otherwise +%% you will deadlock the entire group. +%% +%% group_members/1 +%% Provide the Pid. Returns a list of the current group members. +%% +%% +%% Implementation Overview +%% ----------------------- +%% +%% One possible means of implementation would be a fan-out from the +%% sender to every member of the group. This would require that the +%% group is fully connected, and, in the event that the original +%% sender of the message disappears from the group before the message +%% has made it to every member of the group, raises questions as to +%% who is responsible for sending on the message to new group members. +%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] - +%% if the sender dies part way through, who is responsible for +%% ensuring that the remaining Members receive the Msg? In the event +%% that within the group, messages sent are broadcast from a subset of +%% the members, the fan-out arrangement has the potential to +%% substantially impact the CPU and network workload of such members, +%% as such members would have to accommodate the cost of sending each +%% message to every group member. +%% +%% Instead, if the members of the group are arranged in a chain, then +%% it becomes easier to reason about who within the group has received +%% each message and who has not. It eases issues of responsibility: in +%% the event of a group member disappearing, the nearest upstream +%% member of the chain is responsible for ensuring that messages +%% continue to propagate down the chain. It also results in equal +%% distribution of sending and receiving workload, even if all +%% messages are being sent from just a single group member. This +%% configuration has the further advantage that it is not necessary +%% for every group member to know of every other group member, and +%% even that a group member does not have to be accessible from all +%% other group members. +%% +%% Performance is kept high by permitting pipelining and all +%% communication between joined group members is asynchronous. In the +%% chain A -> B -> C -> D, if A sends a message to the group, it will +%% not directly contact C or D. However, it must know that D receives +%% the message (in addition to B and C) before it can consider the +%% message fully sent. A simplistic implementation would require that +%% D replies to C, C replies to B and B then replies to A. This would +%% result in a propagation delay of twice the length of the chain. It +%% would also require, in the event of the failure of C, that D knows +%% to directly contact B and issue the necessary replies. Instead, the +%% chain forms a ring: D sends the message on to A: D does not +%% distinguish A as the sender, merely as the next member (downstream) +%% within the chain (which has now become a ring). When A receives +%% from D messages that A sent, it knows that all members have +%% received the message. However, the message is not dead yet: if C +%% died as B was sending to C, then B would need to detect the death +%% of C and forward the message on to D instead: thus every node has +%% to remember every message published until it is told that it can +%% forget about the message. This is essential not just for dealing +%% with failure of members, but also for the addition of new members. +%% +%% Thus once A receives the message back again, it then sends to B an +%% acknowledgement for the message, indicating that B can now forget +%% about the message. B does so, and forwards the ack to C. C forgets +%% the message, and forwards the ack to D, which forgets the message +%% and finally forwards the ack back to A. At this point, A takes no +%% further action: the message and its acknowledgement have made it to +%% every member of the group. The message is now dead, and any new +%% member joining the group at this point will not receive the +%% message. +%% +%% We therefore have two roles: +%% +%% 1. The sender, who upon receiving their own messages back, must +%% then send out acknowledgements, and upon receiving their own +%% acknowledgements back perform no further action. +%% +%% 2. The other group members who upon receiving messages and +%% acknowledgements must update their own internal state accordingly +%% (the sending member must also do this in order to be able to +%% accommodate failures), and forwards messages on to their downstream +%% neighbours. +%% +%% +%% Implementation: It gets trickier +%% -------------------------------- +%% +%% Chain A -> B -> C -> D +%% +%% A publishes a message which B receives. A now dies. B and D will +%% detect the death of A, and will link up, thus the chain is now B -> +%% C -> D. B forwards A's message on to C, who forwards it to D, who +%% forwards it to B. Thus B is now responsible for A's messages - both +%% publications and acknowledgements that were in flight at the point +%% at which A died. Even worse is that this is transitive: after B +%% forwards A's message to C, B dies as well. Now C is not only +%% responsible for B's in-flight messages, but is also responsible for +%% A's in-flight messages. +%% +%% Lemma 1: A member can only determine which dead members they have +%% inherited responsibility for if there is a total ordering on the +%% conflicting additions and subtractions of members from the group. +%% +%% Consider the simultaneous death of B and addition of B' that +%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or +%% C is responsible for in-flight messages from B. It is easy to +%% ensure that at least one of them thinks they have inherited B, but +%% if we do not ensure that exactly one of them inherits B, then we +%% could have B' converting publishes to acks, which then will crash C +%% as C does not believe it has issued acks for those messages. +%% +%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E +%% becoming A -> C' -> E. Who has inherited which of B, C and D? +%% +%% However, for non-conflicting membership changes, only a partial +%% ordering is required. For example, A -> B -> C becoming A -> A' -> +%% B. The addition of A', between A and B can have no conflicts with +%% the death of C: it is clear that A has inherited C's messages. +%% +%% For ease of implementation, we adopt the simple solution, of +%% imposing a total order on all membership changes. +%% +%% On the death of a member, it is ensured the dead member's +%% neighbours become aware of the death, and the upstream neighbour +%% now sends to its new downstream neighbour its state, including the +%% messages pending acknowledgement. The downstream neighbour can then +%% use this to calculate which publishes and acknowledgements it has +%% missed out on, due to the death of its old upstream. Thus the +%% downstream can catch up, and continues the propagation of messages +%% through the group. +%% +%% Lemma 2: When a member is joining, it must synchronously +%% communicate with its upstream member in order to receive its +%% starting state atomically with its addition to the group. +%% +%% New members must start with the same state as their nearest +%% upstream neighbour. This ensures that it is not surprised by +%% acknowledgements they are sent, and that should their downstream +%% neighbour die, they are able to send the correct state to their new +%% downstream neighbour to ensure it can catch up. Thus in the +%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' -> +%% C, A' must start with the state of A, so that it can send C the +%% correct state when B dies, allowing C to detect any missed +%% messages. +%% +%% If A' starts by adding itself to the group membership, A could then +%% die, without A' having received the necessary state from A. This +%% would leave A' responsible for in-flight messages from A, but +%% having the least knowledge of all, of those messages. Thus A' must +%% start by synchronously calling A, which then immediately sends A' +%% back its state. A then adds A' to the group. If A dies at this +%% point then A' will be able to see this (as A' will fail to appear +%% in the group membership), and thus A' will ignore the state it +%% receives from A, and will simply repeat the process, trying to now +%% join downstream from some other member. This ensures that should +%% the upstream die as soon as the new member has been joined, the new +%% member is guaranteed to receive the correct state, allowing it to +%% correctly process messages inherited due to the death of its +%% upstream neighbour. +%% +%% The canonical definition of the group membership is held by a +%% distributed database. Whilst this allows the total ordering of +%% changes to be achieved, it is nevertheless undesirable to have to +%% query this database for the current view, upon receiving each +%% message. Instead, we wish for members to be able to cache a view of +%% the group membership, which then requires a cache invalidation +%% mechanism. Each member maintains its own view of the group +%% membership. Thus when the group's membership changes, members may +%% need to become aware of such changes in order to be able to +%% accurately process messages they receive. Because of the +%% requirement of a total ordering of conflicting membership changes, +%% it is not possible to use the guaranteed broadcast mechanism to +%% communicate these changes: to achieve the necessary ordering, it +%% would be necessary for such messages to be published by exactly one +%% member, which can not be guaranteed given that such a member could +%% die. +%% +%% The total ordering we enforce on membership changes gives rise to a +%% view version number: every change to the membership creates a +%% different view, and the total ordering permits a simple +%% monotonically increasing view version number. +%% +%% Lemma 3: If a message is sent from a member that holds view version +%% N, it can be correctly processed by any member receiving the +%% message with a view version >= N. +%% +%% Initially, let us suppose that each view contains the ordering of +%% every member that was ever part of the group. Dead members are +%% marked as such. Thus we have a ring of members, some of which are +%% dead, and are thus inherited by the nearest alive downstream +%% member. +%% +%% In the chain A -> B -> C, all three members initially have view +%% version 1, which reflects reality. B publishes a message, which is +%% forward by C to A. B now dies, which A notices very quickly. Thus A +%% updates the view, creating version 2. It now forwards B's +%% publication, sending that message to its new downstream neighbour, +%% C. This happens before C is aware of the death of B. C must become +%% aware of the view change before it interprets the message its +%% received, otherwise it will fail to learn of the death of B, and +%% thus will not realise it has inherited B's messages (and will +%% likely crash). +%% +%% Thus very simply, we have that each subsequent view contains more +%% information than the preceding view. +%% +%% However, to avoid the views growing indefinitely, we need to be +%% able to delete members which have died _and_ for which no messages +%% are in-flight. This requires that upon inheriting a dead member, we +%% know the last publication sent by the dead member (this is easy: we +%% inherit a member because we are the nearest downstream member which +%% implies that we know at least as much than everyone else about the +%% publications of the dead member), and we know the earliest message +%% for which the acknowledgement is still in flight. +%% +%% In the chain A -> B -> C, when B dies, A will send to C its state +%% (as C is the new downstream from A), allowing C to calculate which +%% messages it has missed out on (described above). At this point, C +%% also inherits B's messages. If that state from A also includes the +%% last message published by B for which an acknowledgement has been +%% seen, then C knows exactly which further acknowledgements it must +%% receive (also including issuing acknowledgements for publications +%% still in-flight that it receives), after which it is known there +%% are no more messages in flight for B, thus all evidence that B was +%% ever part of the group can be safely removed from the canonical +%% group membership. +%% +%% Thus, for every message that a member sends, it includes with that +%% message its view version. When a member receives a message it will +%% update its view from the canonical copy, should its view be older +%% than the view version included in the message it has received. +%% +%% The state held by each member therefore includes the messages from +%% each publisher pending acknowledgement, the last publication seen +%% from that publisher, and the last acknowledgement from that +%% publisher. In the case of the member's own publications or +%% inherited members, this last acknowledgement seen state indicates +%% the last acknowledgement retired, rather than sent. +%% +%% +%% Proof sketch +%% ------------ +%% +%% We need to prove that with the provided operational semantics, we +%% can never reach a state that is not well formed from a well-formed +%% starting state. +%% +%% Operational semantics (small step): straight-forward message +%% sending, process monitoring, state updates. +%% +%% Well formed state: dead members inherited by exactly one non-dead +%% member; for every entry in anyone's pending-acks, either (the +%% publication of the message is in-flight downstream from the member +%% and upstream from the publisher) or (the acknowledgement of the +%% message is in-flight downstream from the publisher and upstream +%% from the member). +%% +%% Proof by induction on the applicable operational semantics. +%% +%% +%% Related work +%% ------------ +%% +%% The ring configuration and double traversal of messages around the +%% ring is similar (though developed independently) to the LCR +%% protocol by [Levy 2008]. However, LCR differs in several +%% ways. Firstly, by using vector clocks, it enforces a total order of +%% message delivery, which is unnecessary for our purposes. More +%% significantly, it is built on top of a "group communication system" +%% which performs the group management functions, taking +%% responsibility away from the protocol as to how to cope with safely +%% adding and removing members. When membership changes do occur, the +%% protocol stipulates that every member must perform communication +%% with every other member of the group, to ensure all outstanding +%% deliveries complete, before the entire group transitions to the new +%% view. This, in total, requires two sets of all-to-all synchronous +%% communications. +%% +%% This is not only rather inefficient, but also does not explain what +%% happens upon the failure of a member during this process. It does +%% though entirely avoid the need for inheritance of responsibility of +%% dead members that our protocol incorporates. +%% +%% In [Marandi et al 2010], a Paxos-based protocol is described. This +%% work explicitly focuses on the efficiency of communication. LCR +%% (and our protocol too) are more efficient, but at the cost of +%% higher latency. The Ring-Paxos protocol is itself built on top of +%% IP-multicast, which rules it out for many applications where +%% point-to-point communication is all that can be required. They also +%% have an excellent related work section which I really ought to +%% read... +%% +%% +%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008. +%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast +%% Protocol + + +-behaviour(gen_server2). + +-export([create_tables/0, start_link/3, leave/1, broadcast/2, + confirmed_broadcast/2, group_members/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3, prioritise_info/2]). + +-export([behaviour_info/1]). + +-export([table_definitions/0]). + +-define(GROUP_TABLE, gm_group). +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). +-define(SETS, ordsets). +-define(DICT, orddict). + +-record(state, + { self, + left, + right, + group_name, + module, + view, + pub_count, + members_state, + callback_args, + confirms + }). + +-record(gm_group, { name, version, members }). + +-record(view_member, { id, aliases, left, right }). + +-record(member, { pending_ack, last_pub, last_ack }). + +-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group}, + {attributes, record_info(fields, gm_group)}]}). +-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}). + +-define(TAG, '$gm'). + +-ifdef(use_specs). + +-export_type([group_name/0]). + +-type(group_name() :: any()). + +-spec(create_tables/0 :: () -> 'ok'). +-spec(start_link/3 :: (group_name(), atom(), [any()]) -> + {'ok', pid()} | {'error', any()}). +-spec(leave/1 :: (pid()) -> 'ok'). +-spec(broadcast/2 :: (pid(), any()) -> 'ok'). +-spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok'). +-spec(group_members/1 :: (pid()) -> [pid()]). + +-endif. + +behaviour_info(callbacks) -> + [ + %% Called when we've successfully joined the group. Supplied with + %% Args provided in start_link, plus current group members. + {joined, 2}, + + %% Supplied with Args provided in start_link, the list of new + %% members and the list of members previously known to us that + %% have since died. Note that if a member joins and dies very + %% quickly, it's possible that we will never see that member + %% appear in either births or deaths. However we are guaranteed + %% that (1) we will see a member joining either in the births + %% here, or in the members passed to joined/1 before receiving + %% any messages from it; and (2) we will not see members die that + %% we have not seen born (or supplied in the members to + %% joined/1). + {members_changed, 3}, + + %% Supplied with Args provided in start_link, the sender, and the + %% message. This does get called for messages injected by this + %% member, however, in such cases, there is no special + %% significance of this call: it does not indicate that the + %% message has made it to any other members, let alone all other + %% members. + {handle_msg, 3}, + + %% Called on gm member termination as per rules in gen_server, + %% with the Args provided in start_link plus the termination + %% Reason. + {terminate, 2} + ]; +behaviour_info(_Other) -> + undefined. + +create_tables() -> + create_tables([?TABLE]). + +create_tables([]) -> + ok; +create_tables([{Table, Attributes} | Tables]) -> + case mnesia:create_table(Table, Attributes) of + {atomic, ok} -> create_tables(Tables); + {aborted, {already_exists, gm_group}} -> create_tables(Tables); + Err -> Err + end. + +table_definitions() -> + {Name, Attributes} = ?TABLE, + [{Name, [?TABLE_MATCH | Attributes]}]. + +start_link(GroupName, Module, Args) -> + gen_server2:start_link(?MODULE, [GroupName, Module, Args], []). + +leave(Server) -> + gen_server2:cast(Server, leave). + +broadcast(Server, Msg) -> + gen_server2:cast(Server, {broadcast, Msg}). + +confirmed_broadcast(Server, Msg) -> + gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity). + +group_members(Server) -> + gen_server2:call(Server, group_members, infinity). + + +init([GroupName, Module, Args]) -> + random:seed(now()), + gen_server2:cast(self(), join), + Self = self(), + {ok, #state { self = Self, + left = {Self, undefined}, + right = {Self, undefined}, + group_name = GroupName, + module = Module, + view = undefined, + pub_count = 0, + members_state = undefined, + callback_args = Args, + confirms = queue:new() }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + + +handle_call({confirmed_broadcast, _Msg}, _From, + State = #state { members_state = undefined }) -> + reply(not_joined, State); + +handle_call({confirmed_broadcast, Msg}, _From, + State = #state { self = Self, + right = {Self, undefined}, + module = Module, + callback_args = Args }) -> + handle_callback_result({Module:handle_msg(Args, Self, Msg), ok, State}); + +handle_call({confirmed_broadcast, Msg}, From, State) -> + internal_broadcast(Msg, From, State); + +handle_call(group_members, _From, + State = #state { members_state = undefined }) -> + reply(not_joined, State); + +handle_call(group_members, _From, State = #state { view = View }) -> + reply(alive_view_members(View), State); + +handle_call({add_on_right, _NewMember}, _From, + State = #state { members_state = undefined }) -> + reply(not_ready, State); + +handle_call({add_on_right, NewMember}, _From, + State = #state { self = Self, + group_name = GroupName, + view = View, + members_state = MembersState, + module = Module, + callback_args = Args }) -> + Group = record_new_member_in_group( + GroupName, Self, NewMember, + fun (Group1) -> + View1 = group_to_view(Group1), + ok = send_right(NewMember, View1, + {catchup, Self, prepare_members_state( + MembersState)}) + end), + View2 = group_to_view(Group), + State1 = check_neighbours(State #state { view = View2 }), + Result = callback_view_changed(Args, Module, View, View2), + handle_callback_result({Result, {ok, Group}, State1}). + + +handle_cast({?TAG, ReqVer, Msg}, + State = #state { view = View, + group_name = GroupName, + module = Module, + callback_args = Args }) -> + {Result, State1} = + case needs_view_update(ReqVer, View) of + true -> + View1 = group_to_view(read_group(GroupName)), + {callback_view_changed(Args, Module, View, View1), + check_neighbours(State #state { view = View1 })}; + false -> + {ok, State} + end, + handle_callback_result( + if_callback_success( + Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)); + +handle_cast({broadcast, _Msg}, State = #state { members_state = undefined }) -> + noreply(State); + +handle_cast({broadcast, Msg}, + State = #state { self = Self, + right = {Self, undefined}, + module = Module, + callback_args = Args }) -> + handle_callback_result({Module:handle_msg(Args, Self, Msg), State}); + +handle_cast({broadcast, Msg}, State) -> + internal_broadcast(Msg, none, State); + +handle_cast(join, State = #state { self = Self, + group_name = GroupName, + members_state = undefined, + module = Module, + callback_args = Args }) -> + View = join_group(Self, GroupName), + MembersState = + case alive_view_members(View) of + [Self] -> blank_member_state(); + _ -> undefined + end, + State1 = check_neighbours(State #state { view = View, + members_state = MembersState }), + handle_callback_result( + {Module:joined(Args, all_known_members(View)), State1}); + +handle_cast(leave, State) -> + {stop, normal, State}. + + +handle_info({'DOWN', MRef, process, _Pid, _Reason}, + State = #state { self = Self, + left = Left, + right = Right, + group_name = GroupName, + view = View, + module = Module, + callback_args = Args, + confirms = Confirms }) -> + Member = case {Left, Right} of + {{Member1, MRef}, _} -> Member1; + {_, {Member1, MRef}} -> Member1; + _ -> undefined + end, + case Member of + undefined -> + noreply(State); + _ -> + View1 = + group_to_view(record_dead_member_in_group(Member, GroupName)), + State1 = State #state { view = View1 }, + {Result, State2} = + case alive_view_members(View1) of + [Self] -> + maybe_erase_aliases( + State1 #state { + members_state = blank_member_state(), + confirms = purge_confirms(Confirms) }); + _ -> + %% here we won't be pointing out any deaths: + %% the concern is that there maybe births + %% which we'd otherwise miss. + {callback_view_changed(Args, Module, View, View1), + State1} + end, + handle_callback_result({Result, check_neighbours(State2)}) + end. + + +terminate(Reason, #state { module = Module, + callback_args = Args }) -> + Module:terminate(Args, Reason). + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; +prioritise_info(_ , _State) -> 0. + + +handle_msg(check_neighbours, State) -> + %% no-op - it's already been done by the calling handle_cast + {ok, State}; + +handle_msg({catchup, Left, MembersStateLeft}, + State = #state { self = Self, + left = {Left, _MRefL}, + right = {Right, _MRefR}, + view = View, + members_state = undefined }) -> + ok = send_right(Right, View, {catchup, Self, MembersStateLeft}), + MembersStateLeft1 = build_members_state(MembersStateLeft), + {ok, State #state { members_state = MembersStateLeft1 }}; + +handle_msg({catchup, Left, MembersStateLeft}, + State = #state { self = Self, + left = {Left, _MRefL}, + view = View, + members_state = MembersState }) + when MembersState =/= undefined -> + MembersStateLeft1 = build_members_state(MembersStateLeft), + AllMembers = lists:usort(?DICT:fetch_keys(MembersState) ++ + ?DICT:fetch_keys(MembersStateLeft1)), + {MembersState1, Activity} = + lists:foldl( + fun (Id, MembersStateActivity) -> + #member { pending_ack = PALeft, last_ack = LA } = + find_member_or_blank(Id, MembersStateLeft1), + with_member_acc( + fun (#member { pending_ack = PA } = Member, Activity1) -> + case is_member_alias(Id, Self, View) of + true -> + {_AcksInFlight, Pubs, _PA1} = + find_prefix_common_suffix(PALeft, PA), + {Member #member { last_ack = LA }, + activity_cons(Id, pubs_from_queue(Pubs), + [], Activity1)}; + false -> + {Acks, _Common, Pubs} = + find_prefix_common_suffix(PA, PALeft), + {Member, + activity_cons(Id, pubs_from_queue(Pubs), + acks_from_queue(Acks), + Activity1)} + end + end, Id, MembersStateActivity) + end, {MembersState, activity_nil()}, AllMembers), + handle_msg({activity, Left, activity_finalise(Activity)}, + State #state { members_state = MembersState1 }); + +handle_msg({catchup, _NotLeft, _MembersState}, State) -> + {ok, State}; + +handle_msg({activity, Left, Activity}, + State = #state { self = Self, + left = {Left, _MRefL}, + view = View, + members_state = MembersState, + confirms = Confirms }) + when MembersState =/= undefined -> + {MembersState1, {Confirms1, Activity1}} = + lists:foldl( + fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) -> + with_member_acc( + fun (Member = #member { pending_ack = PA, + last_pub = LP, + last_ack = LA }, + {Confirms2, Activity2}) -> + case is_member_alias(Id, Self, View) of + true -> + {ToAck, PA1} = + find_common(queue_from_pubs(Pubs), PA, + queue:new()), + LA1 = last_ack(Acks, LA), + AckNums = acks_from_queue(ToAck), + Confirms3 = maybe_confirm( + Self, Id, Confirms2, AckNums), + {Member #member { pending_ack = PA1, + last_ack = LA1 }, + {Confirms3, + activity_cons( + Id, [], AckNums, Activity2)}}; + false -> + PA1 = apply_acks(Acks, join_pubs(PA, Pubs)), + LA1 = last_ack(Acks, LA), + LP1 = last_pub(Pubs, LP), + {Member #member { pending_ack = PA1, + last_pub = LP1, + last_ack = LA1 }, + {Confirms2, + activity_cons(Id, Pubs, Acks, Activity2)}} + end + end, Id, MembersStateConfirmsActivity) + end, {MembersState, {Confirms, activity_nil()}}, Activity), + State1 = State #state { members_state = MembersState1, + confirms = Confirms1 }, + Activity3 = activity_finalise(Activity1), + {Result, State2} = maybe_erase_aliases(State1), + ok = maybe_send_activity(Activity3, State2), + if_callback_success( + Result, fun activity_true/3, fun activity_false/3, Activity3, State2); + +handle_msg({activity, _NotLeft, _Activity}, State) -> + {ok, State}. + + +noreply(State) -> + {noreply, State, hibernate}. + +reply(Reply, State) -> + {reply, Reply, State, hibernate}. + +internal_broadcast(Msg, From, State = #state { self = Self, + pub_count = PubCount, + members_state = MembersState, + module = Module, + confirms = Confirms, + callback_args = Args }) -> + PubMsg = {PubCount, Msg}, + Activity = activity_cons(Self, [PubMsg], [], activity_nil()), + ok = maybe_send_activity(activity_finalise(Activity), State), + MembersState1 = + with_member( + fun (Member = #member { pending_ack = PA }) -> + Member #member { pending_ack = queue:in(PubMsg, PA) } + end, Self, MembersState), + Confirms1 = case From of + none -> Confirms; + _ -> queue:in({PubCount, From}, Confirms) + end, + handle_callback_result({Module:handle_msg(Args, Self, Msg), + State #state { pub_count = PubCount + 1, + members_state = MembersState1, + confirms = Confirms1 }}). + + +%% --------------------------------------------------------------------------- +%% View construction and inspection +%% --------------------------------------------------------------------------- + +needs_view_update(ReqVer, {Ver, _View}) -> + Ver < ReqVer. + +view_version({Ver, _View}) -> + Ver. + +is_member_alive({dead, _Member}) -> false; +is_member_alive(_) -> true. + +is_member_alias(Self, Self, _View) -> + true; +is_member_alias(Member, Self, View) -> + ?SETS:is_element(Member, + ((fetch_view_member(Self, View)) #view_member.aliases)). + +dead_member_id({dead, Member}) -> Member. + +store_view_member(VMember = #view_member { id = Id }, {Ver, View}) -> + {Ver, ?DICT:store(Id, VMember, View)}. + +with_view_member(Fun, View, Id) -> + store_view_member(Fun(fetch_view_member(Id, View)), View). + +fetch_view_member(Id, {_Ver, View}) -> + ?DICT:fetch(Id, View). + +find_view_member(Id, {_Ver, View}) -> + ?DICT:find(Id, View). + +blank_view(Ver) -> + {Ver, ?DICT:new()}. + +alive_view_members({_Ver, View}) -> + ?DICT:fetch_keys(View). + +all_known_members({_Ver, View}) -> + ?DICT:fold( + fun (Member, #view_member { aliases = Aliases }, Acc) -> + ?SETS:to_list(Aliases) ++ [Member | Acc] + end, [], View). + +group_to_view(#gm_group { members = Members, version = Ver }) -> + Alive = lists:filter(fun is_member_alive/1, Members), + [_|_] = Alive, %% ASSERTION - can't have all dead members + add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members). + +link_view([Left, Middle, Right | Rest], View) -> + case find_view_member(Middle, View) of + error -> + link_view( + [Middle, Right | Rest], + store_view_member(#view_member { id = Middle, + aliases = ?SETS:new(), + left = Left, + right = Right }, View)); + {ok, _} -> + View + end; +link_view(_, View) -> + View. + +add_aliases(View, Members) -> + Members1 = ensure_alive_suffix(Members), + {EmptyDeadSet, View1} = + lists:foldl( + fun (Member, {DeadAcc, ViewAcc}) -> + case is_member_alive(Member) of + true -> + {?SETS:new(), + with_view_member( + fun (VMember = + #view_member { aliases = Aliases }) -> + VMember #view_member { + aliases = ?SETS:union(Aliases, DeadAcc) } + end, ViewAcc, Member)}; + false -> + {?SETS:add_element(dead_member_id(Member), DeadAcc), + ViewAcc} + end + end, {?SETS:new(), View}, Members1), + 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION + View1. + +ensure_alive_suffix(Members) -> + queue:to_list(ensure_alive_suffix1(queue:from_list(Members))). + +ensure_alive_suffix1(MembersQ) -> + {{value, Member}, MembersQ1} = queue:out_r(MembersQ), + case is_member_alive(Member) of + true -> MembersQ; + false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1)) + end. + + +%% --------------------------------------------------------------------------- +%% View modification +%% --------------------------------------------------------------------------- + +join_group(Self, GroupName) -> + join_group(Self, GroupName, read_group(GroupName)). + +join_group(Self, GroupName, {error, not_found}) -> + join_group(Self, GroupName, prune_or_create_group(Self, GroupName)); +join_group(Self, _GroupName, #gm_group { members = [Self] } = Group) -> + group_to_view(Group); +join_group(Self, GroupName, #gm_group { members = Members } = Group) -> + case lists:member(Self, Members) of + true -> + group_to_view(Group); + false -> + case lists:filter(fun is_member_alive/1, Members) of + [] -> + join_group(Self, GroupName, + prune_or_create_group(Self, GroupName)); + Alive -> + Left = lists:nth(random:uniform(length(Alive)), Alive), + try + case gen_server2:call( + Left, {add_on_right, Self}, infinity) of + {ok, Group1} -> group_to_view(Group1); + not_ready -> join_group(Self, GroupName) + end + catch + exit:{R, _} + when R =:= noproc; R =:= normal; R =:= shutdown -> + join_group( + Self, GroupName, + record_dead_member_in_group(Left, GroupName)) + end + end + end. + +read_group(GroupName) -> + case mnesia:dirty_read(?GROUP_TABLE, GroupName) of + [] -> {error, not_found}; + [Group] -> Group + end. + +prune_or_create_group(Self, GroupName) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> GroupNew = #gm_group { name = GroupName, + members = [Self], + version = 0 }, + case mnesia:read(?GROUP_TABLE, GroupName) of + [] -> + mnesia:write(GroupNew), + GroupNew; + [Group1 = #gm_group { members = Members }] -> + case lists:any(fun is_member_alive/1, Members) of + true -> Group1; + false -> mnesia:write(GroupNew), + GroupNew + end + end + end), + Group. + +record_dead_member_in_group(Member, GroupName) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = + mnesia:read(?GROUP_TABLE, GroupName), + case lists:splitwith( + fun (Member1) -> Member1 =/= Member end, Members) of + {_Members1, []} -> %% not found - already recorded dead + Group1; + {Members1, [Member | Members2]} -> + Members3 = Members1 ++ [{dead, Member} | Members2], + Group2 = Group1 #gm_group { members = Members3, + version = Ver + 1 }, + mnesia:write(Group2), + Group2 + end + end), + Group. + +record_new_member_in_group(GroupName, Left, NewMember, Fun) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> + [#gm_group { members = Members, version = Ver } = Group1] = + mnesia:read(?GROUP_TABLE, GroupName), + {Prefix, [Left | Suffix]} = + lists:splitwith(fun (M) -> M =/= Left end, Members), + Members1 = Prefix ++ [Left, NewMember | Suffix], + Group2 = Group1 #gm_group { members = Members1, + version = Ver + 1 }, + ok = Fun(Group2), + mnesia:write(Group2), + Group2 + end), + Group. + +erase_members_in_group(Members, GroupName) -> + DeadMembers = [{dead, Id} || Id <- Members], + {atomic, Group} = + mnesia:sync_transaction( + fun () -> + [Group1 = #gm_group { members = [_|_] = Members1, + version = Ver }] = + mnesia:read(?GROUP_TABLE, GroupName), + case Members1 -- DeadMembers of + Members1 -> Group1; + Members2 -> Group2 = + Group1 #gm_group { members = Members2, + version = Ver + 1 }, + mnesia:write(Group2), + Group2 + end + end), + Group. + +maybe_erase_aliases(State = #state { self = Self, + group_name = GroupName, + view = View, + members_state = MembersState, + module = Module, + callback_args = Args }) -> + #view_member { aliases = Aliases } = fetch_view_member(Self, View), + {Erasable, MembersState1} + = ?SETS:fold( + fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> + #member { last_pub = LP, last_ack = LA } = + find_member_or_blank(Id, MembersState), + case can_erase_view_member(Self, Id, LA, LP) of + true -> {[Id | ErasableAcc], + erase_member(Id, MembersStateAcc)}; + false -> Acc + end + end, {[], MembersState}, Aliases), + State1 = State #state { members_state = MembersState1 }, + case Erasable of + [] -> {ok, State1}; + _ -> View1 = group_to_view( + erase_members_in_group(Erasable, GroupName)), + {callback_view_changed(Args, Module, View, View1), + State1 #state { view = View1 }} + end. + +can_erase_view_member(Self, Self, _LA, _LP) -> false; +can_erase_view_member(_Self, _Id, N, N) -> true; +can_erase_view_member(_Self, _Id, _LA, _LP) -> false. + + +%% --------------------------------------------------------------------------- +%% View monitoring and maintanence +%% --------------------------------------------------------------------------- + +ensure_neighbour(_Ver, Self, {Self, undefined}, Self) -> + {Self, undefined}; +ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) -> + ok = gen_server2:cast(RealNeighbour, {?TAG, Ver, check_neighbours}), + {RealNeighbour, maybe_monitor(RealNeighbour, Self)}; +ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) -> + {RealNeighbour, MRef}; +ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) -> + true = erlang:demonitor(MRef), + Msg = {?TAG, Ver, check_neighbours}, + ok = gen_server2:cast(RealNeighbour, Msg), + ok = case Neighbour of + Self -> ok; + _ -> gen_server2:cast(Neighbour, Msg) + end, + {Neighbour, maybe_monitor(Neighbour, Self)}. + +maybe_monitor(Self, Self) -> + undefined; +maybe_monitor(Other, _Self) -> + erlang:monitor(process, Other). + +check_neighbours(State = #state { self = Self, + left = Left, + right = Right, + view = View }) -> + #view_member { left = VLeft, right = VRight } + = fetch_view_member(Self, View), + Ver = view_version(View), + Left1 = ensure_neighbour(Ver, Self, Left, VLeft), + Right1 = ensure_neighbour(Ver, Self, Right, VRight), + State1 = State #state { left = Left1, right = Right1 }, + ok = maybe_send_catchup(Right, State1), + State1. + +maybe_send_catchup(Right, #state { right = Right }) -> + ok; +maybe_send_catchup(_Right, #state { self = Self, + right = {Self, undefined} }) -> + ok; +maybe_send_catchup(_Right, #state { members_state = undefined }) -> + ok; +maybe_send_catchup(_Right, #state { self = Self, + right = {Right, _MRef}, + view = View, + members_state = MembersState }) -> + send_right(Right, View, + {catchup, Self, prepare_members_state(MembersState)}). + + +%% --------------------------------------------------------------------------- +%% Catch_up delta detection +%% --------------------------------------------------------------------------- + +find_prefix_common_suffix(A, B) -> + {Prefix, A1} = find_prefix(A, B, queue:new()), + {Common, Suffix} = find_common(A1, B, queue:new()), + {Prefix, Common, Suffix}. + +%% Returns the elements of A that occur before the first element of B, +%% plus the remainder of A. +find_prefix(A, B, Prefix) -> + case {queue:out(A), queue:out(B)} of + {{{value, Val}, _A1}, {{value, Val}, _B1}} -> + {Prefix, A}; + {{empty, A1}, {{value, _A}, _B1}} -> + {Prefix, A1}; + {{{value, {NumA, _MsgA} = Val}, A1}, + {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB -> + find_prefix(A1, B, queue:in(Val, Prefix)); + {_, {empty, _B1}} -> + {A, Prefix} %% Prefix well be empty here + end. + +%% A should be a prefix of B. Returns the commonality plus the +%% remainder of B. +find_common(A, B, Common) -> + case {queue:out(A), queue:out(B)} of + {{{value, Val}, A1}, {{value, Val}, B1}} -> + find_common(A1, B1, queue:in(Val, Common)); + {{empty, _A}, _} -> + {Common, B} + end. + + +%% --------------------------------------------------------------------------- +%% Members helpers +%% --------------------------------------------------------------------------- + +with_member(Fun, Id, MembersState) -> + store_member( + Id, Fun(find_member_or_blank(Id, MembersState)), MembersState). + +with_member_acc(Fun, Id, {MembersState, Acc}) -> + {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc), + {store_member(Id, MemberState, MembersState), Acc1}. + +find_member_or_blank(Id, MembersState) -> + case ?DICT:find(Id, MembersState) of + {ok, Result} -> Result; + error -> blank_member() + end. + +erase_member(Id, MembersState) -> + ?DICT:erase(Id, MembersState). + +blank_member() -> + #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }. + +blank_member_state() -> + ?DICT:new(). + +store_member(Id, MemberState, MembersState) -> + ?DICT:store(Id, MemberState, MembersState). + +prepare_members_state(MembersState) -> + ?DICT:to_list(MembersState). + +build_members_state(MembersStateList) -> + ?DICT:from_list(MembersStateList). + + +%% --------------------------------------------------------------------------- +%% Activity assembly +%% --------------------------------------------------------------------------- + +activity_nil() -> + queue:new(). + +activity_cons(_Id, [], [], Tail) -> + Tail; +activity_cons(Sender, Pubs, Acks, Tail) -> + queue:in({Sender, Pubs, Acks}, Tail). + +activity_finalise(Activity) -> + queue:to_list(Activity). + +maybe_send_activity([], _State) -> + ok; +maybe_send_activity(Activity, #state { self = Self, + right = {Right, _MRefR}, + view = View }) -> + send_right(Right, View, {activity, Self, Activity}). + +send_right(Right, View, Msg) -> + ok = gen_server2:cast(Right, {?TAG, view_version(View), Msg}). + +callback(Args, Module, Activity) -> + lists:foldl( + fun ({Id, Pubs, _Acks}, ok) -> + lists:foldl(fun ({_PubNum, Pub}, ok) -> + Module:handle_msg(Args, Id, Pub); + (_, Error) -> + Error + end, ok, Pubs); + (_, Error) -> + Error + end, ok, Activity). + +callback_view_changed(Args, Module, OldView, NewView) -> + OldMembers = all_known_members(OldView), + NewMembers = all_known_members(NewView), + Births = NewMembers -- OldMembers, + Deaths = OldMembers -- NewMembers, + case {Births, Deaths} of + {[], []} -> ok; + _ -> Module:members_changed(Args, Births, Deaths) + end. + +handle_callback_result({Result, State}) -> + if_callback_success( + Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State); +handle_callback_result({Result, Reply, State}) -> + if_callback_success( + Result, fun reply_true/3, fun reply_false/3, Reply, State). + +no_reply_true (_Result, _Undefined, State) -> noreply(State). +no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}. + +reply_true (_Result, Reply, State) -> reply(Reply, State). +reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}. + +handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State). +handle_msg_false(Result, _Msg, State) -> {Result, State}. + +activity_true(_Result, Activity, State = #state { module = Module, + callback_args = Args }) -> + {callback(Args, Module, Activity), State}. +activity_false(Result, _Activity, State) -> + {Result, State}. + +if_callback_success(ok, True, _False, Arg, State) -> + True(ok, Arg, State); +if_callback_success( + {become, Module, Args} = Result, True, _False, Arg, State) -> + True(Result, Arg, State #state { module = Module, + callback_args = Args }); +if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) -> + False(Result, Arg, State). + +maybe_confirm(_Self, _Id, Confirms, []) -> + Confirms; +maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) -> + case queue:out(Confirms) of + {empty, _Confirms} -> + Confirms; + {{value, {PubNum, From}}, Confirms1} -> + gen_server2:reply(From, ok), + maybe_confirm(Self, Self, Confirms1, PubNums); + {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum -> + maybe_confirm(Self, Self, Confirms, PubNums) + end; +maybe_confirm(_Self, _Id, Confirms, _PubNums) -> + Confirms. + +purge_confirms(Confirms) -> + [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)], + queue:new(). + + +%% --------------------------------------------------------------------------- +%% Msg transformation +%% --------------------------------------------------------------------------- + +acks_from_queue(Q) -> + [PubNum || {PubNum, _Msg} <- queue:to_list(Q)]. + +pubs_from_queue(Q) -> + queue:to_list(Q). + +queue_from_pubs(Pubs) -> + queue:from_list(Pubs). + +apply_acks([], Pubs) -> + Pubs; +apply_acks(List, Pubs) -> + {_, Pubs1} = queue:split(length(List), Pubs), + Pubs1. + +join_pubs(Q, []) -> Q; +join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)). + +last_ack([], LA) -> + LA; +last_ack(List, LA) -> + LA1 = lists:last(List), + true = LA1 > LA, %% ASSERTION + LA1. + +last_pub([], LP) -> + LP; +last_pub(List, LP) -> + {PubNum, _Msg} = lists:last(List), + true = PubNum > LP, %% ASSERTION + PubNum. diff --git a/src/gm_test.erl b/src/gm_test.erl new file mode 100644 index 00000000..e8f28598 --- /dev/null +++ b/src/gm_test.erl @@ -0,0 +1,126 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(gm_test). + +-export([test/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +get_state() -> + get(state). + +with_state(Fun) -> + put(state, Fun(get_state())). + +inc() -> + case 1 + get(count) of + 100000 -> Now = os:timestamp(), + Start = put(ts, Now), + Diff = timer:now_diff(Now, Start), + Rate = 100000 / (Diff / 1000000), + io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), + put(count, 0); + N -> put(count, N) + end. + +joined([], Members) -> + io:format("Joined ~p (~p members)~n", [self(), length(Members)]), + put(state, dict:from_list([{Member, empty} || Member <- Members])), + put(count, 0), + put(ts, os:timestamp()), + ok. + +members_changed([], Births, Deaths) -> + with_state( + fun (State) -> + State1 = + lists:foldl( + fun (Born, StateN) -> + false = dict:is_key(Born, StateN), + dict:store(Born, empty, StateN) + end, State, Births), + lists:foldl( + fun (Died, StateN) -> + true = dict:is_key(Died, StateN), + dict:store(Died, died, StateN) + end, State1, Deaths) + end), + ok. + +handle_msg([], From, {test_msg, Num}) -> + inc(), + with_state( + fun (State) -> + ok = case dict:find(From, State) of + {ok, died} -> + exit({{from, From}, + {received_posthumous_delivery, Num}}); + {ok, empty} -> ok; + {ok, Num} -> ok; + {ok, Num1} when Num < Num1 -> + exit({{from, From}, + {duplicate_delivery_of, Num1}, + {expecting, Num}}); + {ok, Num1} -> + exit({{from, From}, + {missing_delivery_of, Num}, + {received_early, Num1}}); + error -> + exit({{from, From}, + {received_premature_delivery, Num}}) + end, + dict:store(From, Num + 1, State) + end), + ok. + +terminate([], Reason) -> + io:format("Left ~p (~p)~n", [self(), Reason]), + ok. + +spawn_member() -> + spawn_link( + fun () -> + random:seed(now()), + %% start up delay of no more than 10 seconds + timer:sleep(random:uniform(10000)), + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), + Start = random:uniform(10000), + send_loop(Pid, Start, Start + random:uniform(10000)), + gm:leave(Pid), + spawn_more() + end). + +spawn_more() -> + [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. + +send_loop(_Pid, Target, Target) -> + ok; +send_loop(Pid, Count, Target) when Target > Count -> + case random:uniform(3) of + 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); + _ -> gm:broadcast(Pid, {test_msg, Count}) + end, + timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms + send_loop(Pid, Count + 1, Target). + +test() -> + ok = gm:create_tables(), + spawn_member(), + spawn_member(). -- cgit v1.2.1 From a29958797d402243f9b36083f7d2f317eb9ed40f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 21 Jan 2011 12:10:41 +0000 Subject: bump year on copyrights --- include/gm_specs.hrl | 2 +- src/gm.erl | 2 +- src/gm_test.erl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index 7f607755..987866db 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -ifdef(use_specs). diff --git a/src/gm.erl b/src/gm.erl index baf46471..8fea9196 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(gm). diff --git a/src/gm_test.erl b/src/gm_test.erl index e8f28598..e0a92a0c 100644 --- a/src/gm_test.erl +++ b/src/gm_test.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(gm_test). -- cgit v1.2.1 From 9a26f636211ca479a310bdfc7168c1ec554ffaae Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 24 Jan 2011 13:42:59 +0000 Subject: First pass at a multi-free init script. --- packaging/RPMS/Fedora/Makefile | 1 - packaging/common/rabbitmq-server.init | 68 ++++++++++++++++++++--------------- packaging/debs/Debian/Makefile | 1 - 3 files changed, 40 insertions(+), 30 deletions(-) diff --git a/packaging/RPMS/Fedora/Makefile b/packaging/RPMS/Fedora/Makefile index 74a1800a..287945fe 100644 --- a/packaging/RPMS/Fedora/Makefile +++ b/packaging/RPMS/Fedora/Makefile @@ -31,7 +31,6 @@ prepare: cp ${COMMON_DIR}/* SOURCES/ sed -i \ - -e 's|^DEFAULTS_FILE=.*$$|DEFAULTS_FILE=/etc/sysconfig/rabbitmq|' \ -e 's|^LOCK_FILE=.*$$|LOCK_FILE=/var/lock/subsys/$$NAME|' \ SOURCES/rabbitmq-server.init sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \ diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 39d23983..54fd39b7 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -17,53 +17,65 @@ ### END INIT INFO PATH=/sbin:/usr/sbin:/bin:/usr/bin -DAEMON=/usr/sbin/rabbitmq-multi NAME=rabbitmq-server +DAEMON=/usr/sbin/${NAME} +CONTROL=/usr/sbin/rabbitmqctl DESC=rabbitmq-server USER=rabbitmq -NODE_COUNT=1 +TIMEOUT=10 ROTATE_SUFFIX= INIT_LOG_DIR=/var/log/rabbitmq -DEFAULTS_FILE= # This is filled in when building packages LOCK_FILE= # This is filled in when building packages test -x $DAEMON || exit 0 -# Include rabbitmq defaults if available -if [ -f "$DEFAULTS_FILE" ] ; then - . $DEFAULTS_FILE -fi - RETVAL=0 set -e start_rabbitmq () { - set +e - $DAEMON start_all ${NODE_COUNT} > ${INIT_LOG_DIR}/startup_log 2> ${INIT_LOG_DIR}/startup_err - case "$?" in - 0) - echo SUCCESS - [ -n "$LOCK_FILE" ] && touch $LOCK_FILE + status_rabbitmq quiet + if [ $RETVAL != 0 ] ; then RETVAL=0 - ;; - 1) - echo TIMEOUT - check ${INIT_LOG_DIR}/startup_\{log,err\} - RETVAL=1 - ;; - *) - echo FAILED - check ${INIT_LOG_DIR}/startup_log, _err + set +e + nohup $DAEMON > ${INIT_LOG_DIR}/startup_log \ + 2> ${INIT_LOG_DIR}/startup_err & + wait_for_rabbitmq + case "$?" in + 0) + echo SUCCESS + [ -n "$LOCK_FILE" ] && touch $LOCK_FILE + RETVAL=0 + ;; + *) + echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\} + RETVAL=1 + ;; + esac + set -e + else + echo RabbitMQ is currently running RETVAL=1 - ;; - esac - set -e + fi +} + +wait_for_rabbitmq() { + WAITED=0 + while [ $WAITED != $TIMEOUT ]; do + if status_rabbitmq quiet ; then + return 0 + fi + sleep 1 + WAITED=`expr $WAITED + 1` + done + return 1 } stop_rabbitmq () { set +e status_rabbitmq quiet if [ $RETVAL = 0 ] ; then - $DAEMON stop_all > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err + $CONTROL stop > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err RETVAL=$? if [ $RETVAL = 0 ] ; then [ -n "$LOCK_FILE" ] && rm -rf $LOCK_FILE @@ -71,7 +83,7 @@ stop_rabbitmq () { echo FAILED - check ${INIT_LOG_DIR}/shutdown_log, _err fi else - echo No nodes running + echo RabbitMQ is not running RETVAL=0 fi set -e @@ -80,9 +92,9 @@ stop_rabbitmq () { status_rabbitmq() { set +e if [ "$1" != "quiet" ] ; then - $DAEMON status 2>&1 + $CONTROL status 2>&1 else - $DAEMON status > /dev/null 2>&1 + $CONTROL status > /dev/null 2>&1 fi if [ $? != 0 ] ; then RETVAL=1 diff --git a/packaging/debs/Debian/Makefile b/packaging/debs/Debian/Makefile index ab05f732..d937fbb2 100644 --- a/packaging/debs/Debian/Makefile +++ b/packaging/debs/Debian/Makefile @@ -23,7 +23,6 @@ package: clean cp -r debian $(UNPACKED_DIR) cp $(COMMON_DIR)/* $(UNPACKED_DIR)/debian/ sed -i \ - -e 's|^DEFAULTS_FILE=.*$$|DEFAULTS_FILE=/etc/default/rabbitmq|' \ -e 's|^LOCK_FILE=.*$$|LOCK_FILE=|' \ $(UNPACKED_DIR)/debian/rabbitmq-server.init sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \ -- cgit v1.2.1 From f1c51c529c28d73cea4bcee4625303fffb886051 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 24 Jan 2011 13:57:24 +0000 Subject: Untested changes to the OCF script. --- packaging/common/rabbitmq-server.ocf | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf index b969535a..4e7df55e 100755 --- a/packaging/common/rabbitmq-server.ocf +++ b/packaging/common/rabbitmq-server.ocf @@ -35,7 +35,7 @@ ## ## OCF instance parameters -## OCF_RESKEY_multi +## OCF_RESKEY_server ## OCF_RESKEY_ctl ## OCF_RESKEY_nodename ## OCF_RESKEY_ip @@ -53,11 +53,11 @@ ####################################################################### -OCF_RESKEY_multi_default="/usr/sbin/rabbitmq-multi" +OCF_RESKEY_server_default="/usr/sbin/rabbitmq-server" OCF_RESKEY_ctl_default="/usr/sbin/rabbitmqctl" OCF_RESKEY_nodename_default="rabbit@localhost" OCF_RESKEY_log_base_default="/var/log/rabbitmq" -: ${OCF_RESKEY_multi=${OCF_RESKEY_multi_default}} +: ${OCF_RESKEY_server=${OCF_RESKEY_server_default}} : ${OCF_RESKEY_ctl=${OCF_RESKEY_ctl_default}} : ${OCF_RESKEY_nodename=${OCF_RESKEY_nodename_default}} : ${OCF_RESKEY_log_base=${OCF_RESKEY_log_base_default}} @@ -76,12 +76,12 @@ Resource agent for RabbitMQ-server Resource agent for RabbitMQ-server - + -The path to the rabbitmq-multi script +The path to the rabbitmq-server script -Path to rabbitmq-multi - +Path to rabbitmq-server + @@ -170,7 +170,7 @@ Expects to have a fully populated OCF RA-compliant environment set. END } -RABBITMQ_MULTI=$OCF_RESKEY_multi +RABBITMQ_SERVER=$OCF_RESKEY_server RABBITMQ_CTL=$OCF_RESKEY_ctl RABBITMQ_NODENAME=$OCF_RESKEY_nodename RABBITMQ_NODE_IP_ADDRESS=$OCF_RESKEY_ip @@ -192,8 +192,8 @@ export_vars() { } rabbit_validate_partial() { - if [ ! -x $RABBITMQ_MULTI ]; then - ocf_log err "rabbitmq-server multi $RABBITMQ_MULTI does not exist or is not executable"; + if [ ! -x $RABBITMQ_SERVER ]; then + ocf_log err "rabbitmq-server server $RABBITMQ_SERVER does not exist or is not executable"; exit $OCF_ERR_INSTALLED; fi @@ -253,11 +253,11 @@ rabbit_start() { export_vars - $RABBITMQ_MULTI start_all 1 > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err & + nohup $RABBITMQ_SERVER > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err & rc=$? if [ "$rc" != 0 ]; then - ocf_log err "rabbitmq-server start command failed: $RABBITMQ_MULTI start_all 1, $rc" + ocf_log err "rabbitmq-server start command failed: $RABBITMQ_SERVER, $rc" return $rc fi @@ -287,11 +287,11 @@ rabbit_stop() { return $OCF_SUCCESS fi - $RABBITMQ_MULTI stop_all & + $RABBITMQ_CTL stop rc=$? if [ "$rc" != 0 ]; then - ocf_log err "rabbitmq-server stop command failed: $RABBITMQ_MULTI stop_all, $rc" + ocf_log err "rabbitmq-server stop command failed: $RABBITMQ_CTL stop, $rc" return $rc fi -- cgit v1.2.1 From d887a84c64321582266051b9a26ac9a9f1d1f6f7 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 24 Jan 2011 17:40:26 +0000 Subject: Treat sender-specified destinations as routing keys rather than queue names --- src/rabbit_exchange.erl | 13 +++++-------- src/rabbit_exchange_type_direct.erl | 10 +++++----- src/rabbit_exchange_type_fanout.erl | 10 ++-------- src/rabbit_exchange_type_topic.erl | 15 ++++++++++----- 4 files changed, 22 insertions(+), 26 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 24079d22..a94e57f8 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). -export([callback/3]). --export([header_routes/2]). +-export([header_routes/1]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -89,8 +89,7 @@ (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). -spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). --spec(header_routes/2 :: (rabbit_framing:amqp_table(), rabbit_types:vhost()) -> - [rabbit_types:r('queue')]). +-spec(header_routes/1 :: (rabbit_framing:amqp_table()) -> [binary()]). -endif. %%---------------------------------------------------------------------------- @@ -326,12 +325,10 @@ unconditional_delete(X = #exchange{name = XName}) -> Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. -header_routes(undefined, _VHost) -> +header_routes(undefined) -> []; -header_routes(Headers, VHost) -> - [rabbit_misc:r(VHost, queue, RKey) || - RKey <- lists:flatten([routing_keys(Headers, Header) || - Header <- ?ROUTING_HEADERS])]. +header_routes(Headers) -> + lists:flatten([routing_keys(Headers, Header) || Header <- ?ROUTING_HEADERS]). routing_keys(HeadersTable, Key) -> case rabbit_misc:table_lookup(HeadersTable, Key) of diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index ade57451..97988381 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -51,13 +51,13 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. -route(#exchange{name = #resource{virtual_host = VHost} = Name}, +route(#exchange{name = Name}, #delivery{message = #basic_message{routing_key = RoutingKey, content = Content}}) -> - BindingRoutes = rabbit_router:match_routing_key(Name, RoutingKey), - HeaderRoutes = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers, VHost), - BindingRoutes ++ HeaderRoutes. + HeaderKeys = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers), + lists:flatten([rabbit_router:match_routing_key(Name, RKey) || + RKey <- [RoutingKey | HeaderKeys]]). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index f3716141..5266dd87 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -31,7 +31,6 @@ -module(rabbit_exchange_type_fanout). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -51,13 +50,8 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. -route(#exchange{name = #resource{virtual_host = VHost} = Name}, - #delivery{message = #basic_message{content = Content}}) -> - BindingRoutes = rabbit_router:match_routing_key(Name, '_'), - HeaderRoutes = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers, VHost), - BindingRoutes ++ HeaderRoutes. - +route(#exchange{name = Name}, _Delivery) -> + rabbit_router:match_routing_key(Name, '_'). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2f0d47a7..8f3c0550 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -30,6 +30,7 @@ %% -module(rabbit_exchange_type_topic). +-include("rabbit_framing.hrl"). -include("rabbit.hrl"). -behaviour(rabbit_exchange_type). @@ -59,11 +60,15 @@ description() -> {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey}}) -> - rabbit_router:match_bindings(Name, - fun (#binding{key = BindingKey}) -> - topic_matches(BindingKey, RoutingKey) - end). + #delivery{message = #basic_message{routing_key = RoutingKey, + content = Content}}) -> + HeaderKeys = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers), + lists:flatten([rabbit_router:match_bindings( + Name, + fun (#binding{key = BindingKey}) -> + topic_matches(BindingKey, RKey) + end) || RKey <- [RoutingKey | HeaderKeys]]). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). -- cgit v1.2.1 From 5f6b9f8881f55d67775df4db00cb513a037d649d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 26 Jan 2011 12:32:34 +0000 Subject: Change the new version format from: [{local, [...]}, {mnesia, [...]}]. to: [{rabbit, [{local, [...]}, {mnesia, [...]}]}]. This is to allow for future work allowing plugins to own upgrades (that can be ignored if the plugin is uninstalled), without having to change the format *again*. --- src/rabbit_upgrade.erl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b222845d..f279029a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -163,7 +163,8 @@ read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [V]} -> case is_new_version(V) of false -> {ok, convert_old_version(V)}; - true -> {ok, V} + true -> [{rabbit, RV}] = V, + {ok, RV} end; {error, _} = Err -> Err end. @@ -175,13 +176,14 @@ read_version(Scope) -> end. write_version() -> - ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), + ok = rabbit_misc:write_term_file(schema_filename(), + [[{rabbit, desired_version()}]]), ok. write_version(Scope) -> {ok, V0} = read_version(), V = orddict:store(Scope, desired_version(Scope), V0), - ok = rabbit_misc:write_term_file(schema_filename(), [V]), + ok = rabbit_misc:write_term_file(schema_filename(), [[{rabbit, V}]]), ok. desired_version() -> -- cgit v1.2.1 From cfca23b81c44262977f879ec53e6bfac0792c8b8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 26 Jan 2011 16:12:13 +0000 Subject: rabbitmqctl status is not adequate to wait for the server as it can return successfully when the vm has started but not the app. The app can then fail. Therefore introduce a new command to wait for the app to start. Note that this subcommand contains a timeout to wait for the VM to start, but will wait indefinitely for the app to start once the VM has. --- docs/rabbitmqctl.1.xml | 22 ++++++++++++++++++++++ packaging/common/rabbitmq-server.init | 15 +-------------- src/rabbit_control.erl | 26 +++++++++++++++++++++++++- 3 files changed, 48 insertions(+), 15 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index bd9fee7d..5c090e5a 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -157,6 +157,28 @@ + + wait + + + Wait for the RabbitMQ application to start. + + + This command will wait for the RabbitMQ application to + start at the node. As long as the Erlang node is up but + the RabbitMQ application is down it will wait + indefinitely. If the node itself goes down, or takes too + long to come up, it will fail. + + For example: + rabbitmqctl wait + + This command will return when the RabbitMQ node has + started up. + + + + status diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 54fd39b7..8ef1000b 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -22,7 +22,6 @@ DAEMON=/usr/sbin/${NAME} CONTROL=/usr/sbin/rabbitmqctl DESC=rabbitmq-server USER=rabbitmq -TIMEOUT=10 ROTATE_SUFFIX= INIT_LOG_DIR=/var/log/rabbitmq @@ -40,7 +39,7 @@ start_rabbitmq () { set +e nohup $DAEMON > ${INIT_LOG_DIR}/startup_log \ 2> ${INIT_LOG_DIR}/startup_err & - wait_for_rabbitmq + $CONTROL wait >/dev/null 2>&1 case "$?" in 0) echo SUCCESS @@ -59,18 +58,6 @@ start_rabbitmq () { fi } -wait_for_rabbitmq() { - WAITED=0 - while [ $WAITED != $TIMEOUT ]; do - if status_rabbitmq quiet ; then - return 0 - fi - sleep 1 - WAITED=`expr $WAITED + 1` - done - return 1 -} - stop_rabbitmq () { set +e status_rabbitmq quiet diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 80483097..a7d07b0f 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -20,6 +20,7 @@ -export([start/0, stop/0, action/5, diagnostics/1]). -define(RPC_TIMEOUT, infinity). +-define(WAIT_FOR_VM_TIMEOUT, 5000). -define(QUIET_OPT, "-q"). -define(NODE_OPT, "-n"). @@ -297,7 +298,30 @@ action(list_permissions, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost ~p", [VHost]), display_list(call(Node, {rabbit_auth_backend_internal, - list_vhost_permissions, [VHost]})). + list_vhost_permissions, [VHost]})); + +action(wait, Node, [], _Opts, Inform) -> + Inform("Waiting for ~p", [Node]), + wait_for_application(Node, ?WAIT_FOR_VM_TIMEOUT). + +wait_for_application(_Node, NodeTimeout) when NodeTimeout =< 0 -> + {badrpc, nodedown}; + +wait_for_application(Node, NodeTimeout) -> + case call(Node, {application, which_applications, []}) of + {badrpc, nodedown} -> wait_for_application0(Node, NodeTimeout - 1000); + {badrpc, _} = E -> E; + Apps -> case proplists:is_defined(rabbit, Apps) of + %% We've seen the node up; if it goes down + %% die immediately. + false -> wait_for_application0(Node, 0); + true -> ok + end + end. + +wait_for_application0(Node, NodeTimeout) -> + timer:sleep(1000), + wait_for_application(Node, NodeTimeout). default_if_empty(List, Default) when is_list(List) -> if List == [] -> -- cgit v1.2.1 From 934688ae55c393bc2ddc693cd1e141e5cd761fa4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 26 Jan 2011 16:28:10 +0000 Subject: Treat all {badrpc, _}s the same. Use which_applications/1. --- src/rabbit_control.erl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index a7d07b0f..8a19dcfb 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -304,19 +304,19 @@ action(wait, Node, [], _Opts, Inform) -> Inform("Waiting for ~p", [Node]), wait_for_application(Node, ?WAIT_FOR_VM_TIMEOUT). -wait_for_application(_Node, NodeTimeout) when NodeTimeout =< 0 -> - {badrpc, nodedown}; - wait_for_application(Node, NodeTimeout) -> - case call(Node, {application, which_applications, []}) of - {badrpc, nodedown} -> wait_for_application0(Node, NodeTimeout - 1000); - {badrpc, _} = E -> E; - Apps -> case proplists:is_defined(rabbit, Apps) of - %% We've seen the node up; if it goes down - %% die immediately. - false -> wait_for_application0(Node, 0); - true -> ok - end + case call(Node, {application, which_applications, [infinity]}) of + {badrpc, _} = E -> NewTimeout = NodeTimeout - 1000, + case NewTimeout =< 0 of + true -> E; + false -> wait_for_application0(Node, NewTimeout) + end; + Apps -> case proplists:is_defined(rabbit, Apps) of + %% We've seen the node up; if it goes down + %% die immediately. + true -> ok; + false -> wait_for_application0(Node, 0) + end end. wait_for_application0(Node, NodeTimeout) -> -- cgit v1.2.1 From 19d9256812c4d8f51d87f96f0dc3c2b04e902d53 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 26 Jan 2011 16:39:08 +0000 Subject: Oops. --- src/rabbit_control.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8a19dcfb..a8903102 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -305,7 +305,7 @@ action(wait, Node, [], _Opts, Inform) -> wait_for_application(Node, ?WAIT_FOR_VM_TIMEOUT). wait_for_application(Node, NodeTimeout) -> - case call(Node, {application, which_applications, [infinity]}) of + case rpc_call(Node, application, which_applications, [infinity]) of {badrpc, _} = E -> NewTimeout = NodeTimeout - 1000, case NewTimeout =< 0 of true -> E; -- cgit v1.2.1 From 8ce0db3120fa014cba473d6fef42b0679d8da795 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 31 Jan 2011 16:40:03 +0000 Subject: Fix the OCF script. Man, that was painful. --- packaging/common/rabbitmq-server.ocf | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf index e0381712..07c7b7ca 100755 --- a/packaging/common/rabbitmq-server.ocf +++ b/packaging/common/rabbitmq-server.ocf @@ -210,8 +210,18 @@ rabbit_validate_full() { } rabbit_status() { + rabbitmqctl_action "status" +} + +rabbit_wait() { + rabbitmqctl_action "wait" +} + +rabbitmqctl_action() { local rc - $RABBITMQ_CTL $NODENAME_ARG status > /dev/null 2> /dev/null + local action + action=$1 + $RABBITMQ_CTL $NODENAME_ARG $action > /dev/null 2> /dev/null rc=$? case "$rc" in 0) @@ -223,7 +233,7 @@ rabbit_status() { return $OCF_NOT_RUNNING ;; *) - ocf_log err "Unexpected return from rabbitmqctl $NODENAME_ARG status: $rc" + ocf_log err "Unexpected return from rabbitmqctl $NODENAME_ARG $action: $rc" exit $OCF_ERR_GENERIC esac } @@ -248,18 +258,12 @@ rabbit_start() { # Spin waiting for the server to come up. # Let the CRM/LRM time us out if required - start_wait=1 - while [ $start_wait = 1 ]; do - rabbit_status - rc=$? - if [ "$rc" = $OCF_SUCCESS ]; then - start_wait=0 - elif [ "$rc" != $OCF_NOT_RUNNING ]; then - ocf_log info "rabbitmq-server start failed: $rc" - exit $OCF_ERR_GENERIC - fi - sleep 1 - done + rabbit_wait + rc=$? + if [ "$rc" != $OCF_SUCCESS ]; then + ocf_log info "rabbitmq-server start failed: $rc" + exit $OCF_ERR_GENERIC + fi return $OCF_SUCCESS } -- cgit v1.2.1 From 8304d8f8a8618b6e3aae73c18b4b2594d62fd67a Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 2 Feb 2011 13:41:24 +0000 Subject: Refactored sender-supplied routing keys --- include/rabbit.hrl | 2 +- src/rabbit_basic.erl | 69 +++++++++++++++++++++++++++---------- src/rabbit_channel.erl | 18 +--------- src/rabbit_exchange.erl | 60 +++++++------------------------- src/rabbit_exchange_type_direct.erl | 45 +++++++----------------- src/rabbit_exchange_type_topic.erl | 8 ++--- src/rabbit_router.erl | 59 +++++++------------------------ 7 files changed, 93 insertions(+), 168 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 5c5fad76..a8b326be 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -72,7 +72,7 @@ -record(listener, {node, protocol, host, ip_address, port}). -record(basic_message, {exchange_name, routing_key, content, guid, - is_persistent}). + is_persistent, route_list = []}). -record(ssl_socket, {tcp, ssl}). -record(delivery, {mandatory, immediate, txn, sender, message, diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 1ac39b65..c9d4808c 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -33,10 +33,9 @@ -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([publish/1, message/4, properties/1, delivery/5]). +-export([publish/1, message/3, message/4, properties/1, delivery/5]). -export([publish/4, publish/7]). -export([build_content/2, from_content/1]). --export([is_message_persistent/1]). %%---------------------------------------------------------------------------- @@ -56,8 +55,10 @@ rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> - (rabbit_types:message() | rabbit_types:error(any()))). + properties_input(), binary()) -> rabbit_types:message()). +-spec(message/3 :: + (rabbit_exchange:name(), rabbit_router:routing_key(), + rabbit_types:decoded_content()) -> rabbit_types:message()). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -71,9 +72,6 @@ rabbit_types:content()). -spec(from_content/1 :: (rabbit_types:content()) -> {rabbit_framing:amqp_property_record(), binary()}). --spec(is_message_persistent/1 :: (rabbit_types:decoded_content()) -> - (boolean() | - {'invalid', non_neg_integer()})). -endif. @@ -113,19 +111,33 @@ from_content(Content) -> rabbit_framing_amqp_0_9_1:method_id('basic.publish'), {Props, list_to_binary(lists:reverse(FragmentsRev))}. +%% This breaks the spec rule forbidding message modification +strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, + Key) when Headers =/= undefined -> + case lists:keyfind(Key, 1, Headers) of + false -> DecodedContent; + Tuple -> Headers0 = lists:delete(Tuple, Headers), + DecodedContent#content{ + properties_bin = none, + properties = Props#'P_basic'{headers = Headers0}} + end; +strip_header(DecodedContent, _Key) -> + DecodedContent. + +message(ExchangeName, RoutingKey, + #content{properties = Props} = DecodedContent) -> + #basic_message{ + exchange_name = ExchangeName, + routing_key = RoutingKey, + content = strip_header(DecodedContent, ?DELETED_HEADER), + guid = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + route_list = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. + message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> Properties = properties(RawProperties), Content = build_content(Properties, BodyBin), - case is_message_persistent(Content) of - {invalid, Other} -> - {error, {invalid_delivery_mode, Other}}; - IsPersistent when is_boolean(IsPersistent) -> - #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKeyBin, - content = Content, - guid = rabbit_guid:guid(), - is_persistent = IsPersistent} - end. + message(ExchangeName, RoutingKeyBin, Content). properties(P = #'P_basic'{}) -> P; @@ -167,5 +179,26 @@ is_message_persistent(#content{properties = #'P_basic'{ 1 -> false; 2 -> true; undefined -> false; - Other -> {invalid, Other} + Other -> rabbit_log:warning("Unknown delivery mode ~p - " + "treating as 1, non-persistent~n", + [Other]), + false end. + +% Extract CC routes from headers +header_routes(undefined) -> + []; +header_routes(HeadersTable) -> + lists:flatten([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {longstr, Route} -> Route; + {array, Routes} -> rkeys(Routes, []); + _ -> [] + end || HeaderKey <- ?ROUTING_HEADERS]). + +rkeys([{longstr, Route} | Rest], RKeys) -> + rkeys(Rest, [Route | RKeys]); +rkeys([_ | Rest], RKeys) -> + rkeys(Rest, RKeys); +rkeys(_, RKeys) -> + RKeys. + diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 5c900b0b..e818dd54 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -527,18 +527,13 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, %% certain to want to look at delivery-mode and priority. DecodedContent = rabbit_binary_parser:ensure_content_decoded(Content), check_user_id_header(DecodedContent#content.properties, State), - IsPersistent = is_message_persistent(DecodedContent), {MsgSeqNo, State1} = case ConfirmEnabled of false -> {undefined, State}; true -> SeqNo = State#ch.publish_seqno, {SeqNo, State#ch{publish_seqno = SeqNo + 1}} end, - Message = #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, - content = DecodedContent, - guid = rabbit_guid:guid(), - is_persistent = IsPersistent}, + Message = rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -1200,17 +1195,6 @@ notify_limiter(LimiterPid, Acked) -> Count -> rabbit_limiter:ack(LimiterPid, Count) end. -is_message_persistent(Content) -> - case rabbit_basic:is_message_persistent(Content) of - {invalid, Other} -> - rabbit_log:warning("Unknown delivery mode ~p - " - "treating as 1, non-persistent~n", - [Other]), - false; - IsPersistent when is_boolean(IsPersistent) -> - IsPersistent - end. - process_routing_result(unroutable, _, MsgSeqNo, Message, State) -> ok = basic_return(Message, State#ch.writer_pid, no_route), send_confirms([MsgSeqNo], State); diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a94e57f8..92259195 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_exchange). @@ -36,7 +21,6 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). -export([callback/3]). --export([header_routes/1]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -89,7 +73,7 @@ (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). -spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). --spec(header_routes/1 :: (rabbit_framing:amqp_table()) -> [binary()]). + -endif. %%---------------------------------------------------------------------------- @@ -324,23 +308,3 @@ unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_exchange, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. - -header_routes(undefined) -> - []; -header_routes(Headers) -> - lists:flatten([routing_keys(Headers, Header) || Header <- ?ROUTING_HEADERS]). - -routing_keys(HeadersTable, Key) -> - case rabbit_misc:table_lookup(HeadersTable, Key) of - {longstr, Route} -> [Route]; - {array, Routes} -> rkeys(Routes, []); - _ -> [] - end. - -rkeys([{longstr, BinVal} | Rest], RKeys) -> - rkeys(Rest, [BinVal | RKeys]); -rkeys([{_, _} | Rest], RKeys) -> - rkeys(Rest, RKeys); -rkeys(_, RKeys) -> - RKeys. - diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 97988381..0baac1f8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -1,37 +1,21 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_exchange_type_direct). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -52,12 +36,9 @@ description() -> {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey, - content = Content}}) -> - HeaderKeys = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers), + #delivery{message = #basic_message{route_list = Routes}}) -> lists:flatten([rabbit_router:match_routing_key(Name, RKey) || - RKey <- [RoutingKey | HeaderKeys]]). + RKey <- Routes]). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 8f3c0550..97cf8ecf 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -30,7 +30,6 @@ %% -module(rabbit_exchange_type_topic). --include("rabbit_framing.hrl"). -include("rabbit.hrl"). -behaviour(rabbit_exchange_type). @@ -60,15 +59,12 @@ description() -> {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey, - content = Content}}) -> - HeaderKeys = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers), + #delivery{message = #basic_message{route_list = Routes}}) -> lists:flatten([rabbit_router:match_bindings( Name, fun (#binding{key = BindingKey}) -> topic_matches(BindingKey, RKey) - end) || RKey <- [RoutingKey | HeaderKeys]]). + end) || RKey <- Routes]). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 7f9b823e..692d2473 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -1,38 +1,22 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_router). -include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -export([deliver/2, match_bindings/2, match_routing_key/2]). @@ -69,39 +53,22 @@ deliver(QNames, Delivery = #delivery{mandatory = false, %% is preserved. This scales much better than the non-immediate %% case below. QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), delegate:invoke_no_result( - QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), + QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, Delivery) end), {routed, QPids}; deliver(QNames, Delivery = #delivery{mandatory = Mandatory, immediate = Immediate}) -> QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), {Success, _} = delegate:invoke(QPids, fun (Pid) -> - rabbit_amqqueue:deliver(Pid, ModifiedDelivery) + rabbit_amqqueue:deliver(Pid, Delivery) end), {Routed, Handled} = lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). -%% This breaks the spec rule forbidding message modification -strip_header(Delivery = #delivery{message = Message = #basic_message{ - content = Content = #content{ - properties = Props = #'P_basic'{headers = Headers}}}}, - Key) when Headers =/= undefined -> - case lists:keyfind(Key, 1, Headers) of - false -> Delivery; - Tuple -> Headers0 = lists:delete(Tuple, Headers), - Delivery#delivery{message = Message#basic_message{ - content = Content#content{ - properties_bin = none, - properties = Props#'P_basic'{headers = Headers0}}}} - end; -strip_header(Delivery, _Key) -> - Delivery. %% TODO: Maybe this should be handled by a cursor instead. %% TODO: This causes a full scan for each entry with the same source -- cgit v1.2.1 From c6e14cf23bcf5cebe1a9f2c3f44d1669d05cb961 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 4 Feb 2011 13:39:35 +0000 Subject: Treat basic_return immediate/mandatory differently --- src/rabbit_channel.erl | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index f9c3c286..ebd8b15c 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1081,12 +1081,11 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, basic_return(#basic_message{exchange_name = ExchangeName, routing_key = RoutingKey, content = Content}, - State, Reason) -> - maybe_incr_stats([{ExchangeName, 1}], return, State), + WriterPid, Reason) -> {_Close, ReplyCode, ReplyText} = rabbit_framing_amqp_0_9_1:lookup_amqp_exception(Reason), ok = rabbit_writer:send_command( - State#ch.writer_pid, + WriterPid, #'basic.return'{reply_code = ReplyCode, reply_text = ReplyText, exchange = ExchangeName#resource.name, @@ -1240,11 +1239,17 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> +process_routing_result(unroutable, _, XName, MsgSeqNo, + Msg = #basic_message{exchange_name = ExchangeName}, + State) -> ok = basic_return(Msg, State#ch.writer_pid, no_route), + maybe_incr_stats([{ExchangeName, 1}], return_unroutable, State), record_confirm(MsgSeqNo, XName, State); -process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> +process_routing_result(not_delivered, _, XName, MsgSeqNo, + Msg = #basic_message{exchange_name = ExchangeName}, + State) -> ok = basic_return(Msg, State#ch.writer_pid, no_consumers), + maybe_incr_stats([{ExchangeName, 1}], return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); -- cgit v1.2.1 From cd64ab0f9b9fe0689a74681fed4e65d7ce333b8f Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 4 Feb 2011 13:42:51 +0000 Subject: cosmetic --- src/rabbit_channel.erl | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index ebd8b15c..87357b89 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1239,17 +1239,15 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, XName, MsgSeqNo, - Msg = #basic_message{exchange_name = ExchangeName}, - State) -> +process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State#ch.writer_pid, no_route), - maybe_incr_stats([{ExchangeName, 1}], return_unroutable, State), + maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], + return_unroutable, State), record_confirm(MsgSeqNo, XName, State); -process_routing_result(not_delivered, _, XName, MsgSeqNo, - Msg = #basic_message{exchange_name = ExchangeName}, - State) -> +process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State#ch.writer_pid, no_consumers), - maybe_incr_stats([{ExchangeName, 1}], return_not_delivered, State), + maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], + return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); -- cgit v1.2.1 From 5ecfe82f4886dee81d6de41e2811b6ab46c0297c Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 4 Feb 2011 14:18:19 +0000 Subject: Remove redundant try/catch from event notifier --- src/rabbit_event.erl | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 40ade4b7..40651d36 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -130,15 +130,8 @@ notify_if(true, Type, Props) -> notify(Type, Props); notify_if(false, _Type, _Props) -> ok. notify(Type, Props) -> - try - %% TODO: switch to os:timestamp() when we drop support for - %% Erlang/OTP < R13B01 - gen_event:notify(rabbit_event, #event{type = Type, - props = Props, - timestamp = now()}) - catch error:badarg -> - %% badarg means rabbit_event is no longer registered. We never - %% unregister it so the great likelihood is that we're shutting - %% down the broker but some events were backed up. Ignore it. - ok - end. + %% TODO: switch to os:timestamp() when we drop support for + %% Erlang/OTP < R13B01 + gen_event:notify(rabbit_event, #event{type = Type, + props = Props, + timestamp = now()}). -- cgit v1.2.1 From caea05b408f238891410107431b3b0994e02ae66 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Feb 2011 16:05:02 +0000 Subject: Just depend on "erlang". --- packaging/debs/Debian/debian/control | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control index 02da0cc6..b01d38b3 100644 --- a/packaging/debs/Debian/debian/control +++ b/packaging/debs/Debian/debian/control @@ -7,10 +7,7 @@ Standards-Version: 3.8.0 Package: rabbitmq-server Architecture: all -# erlang-inets is not a strict dependency, but it's needed to allow -# the installation of plugins that use mochiweb. Ideally it would be a -# "Recommends" instead, but gdebi does not install those. -Depends: erlang-base (>= 1:12.b.3) | erlang-base-hipe (>= 1:12.b.3), erlang-ssl | erlang-nox (<< 1:13.b-dfsg1-1), erlang-os-mon | erlang-nox (<< 1:13.b-dfsg1-1), erlang-mnesia | erlang-nox (<< 1:13.b-dfsg1-1), erlang-inets | erlang-nox (<< 1:13.b-dfsg1-1), adduser, logrotate, ${misc:Depends} +Depends: erlang (>= 1:12.b.3), adduser, logrotate, ${misc:Depends} Description: An AMQP server written in Erlang RabbitMQ is an implementation of AMQP, the emerging standard for high performance enterprise messaging. The RabbitMQ server is a robust and -- cgit v1.2.1 From 79f8240db1539b1bf7e6c3f80d67974449080c22 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Feb 2011 17:28:32 +0000 Subject: formatting --- packaging/common/rabbitmq-server.init | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 8ef1000b..b7101f22 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -48,8 +48,8 @@ start_rabbitmq () { ;; *) echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\} - RETVAL=1 - ;; + RETVAL=1 + ;; esac set -e else -- cgit v1.2.1 From d4eb1cdeddbd33f5e3abaf93743784276df98e43 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Feb 2011 18:44:19 +0000 Subject: Trailing tab and whitespace --- packaging/common/rabbitmq-server.init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index b7101f22..c2652d27 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -99,7 +99,7 @@ rotate_logs_rabbitmq() { } restart_rabbitmq() { - stop_rabbitmq + stop_rabbitmq start_rabbitmq } -- cgit v1.2.1 From 444fcc4b1745d5d16665b56299b1335b23788a2a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 11:48:00 +0000 Subject: Reverse to avoid negated conditional --- packaging/common/rabbitmq-server.init | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index c2652d27..e3f1d13f 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -34,7 +34,10 @@ set -e start_rabbitmq () { status_rabbitmq quiet - if [ $RETVAL != 0 ] ; then + if [ $RETVAL = 0 ] ; then + echo RabbitMQ is currently running + RETVAL=1 + else RETVAL=0 set +e nohup $DAEMON > ${INIT_LOG_DIR}/startup_log \ @@ -52,9 +55,6 @@ start_rabbitmq () { ;; esac set -e - else - echo RabbitMQ is currently running - RETVAL=1 fi } -- cgit v1.2.1 From f315a8348de87819dc3b1fbd1987f94c176e8e01 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 9 Feb 2011 12:01:21 +0000 Subject: Sender-selected destinations - qa feedback --- include/rabbit.hrl | 4 ++-- src/rabbit_basic.erl | 38 +++++++++++++------------------------ src/rabbit_channel.erl | 6 +++--- src/rabbit_exchange_type_direct.erl | 6 +++--- src/rabbit_exchange_type_topic.erl | 12 ++++++------ src/rabbit_types.erl | 2 +- 6 files changed, 28 insertions(+), 40 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 0b6280d1..7bcf021e 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -56,8 +56,8 @@ -record(listener, {node, protocol, host, ip_address, port}). --record(basic_message, {exchange_name, routing_key, content, guid, - is_persistent, route_list = []}). +-record(basic_message, {exchange_name, routing_keys = [], content, guid, + is_persistent}). -record(ssl_socket, {tcp, ssl}). -record(delivery, {mandatory, immediate, txn, sender, message, diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index a144124f..f1348d33 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -97,15 +97,15 @@ from_content(Content) -> {Props, list_to_binary(lists:reverse(FragmentsRev))}. %% This breaks the spec rule forbidding message modification -strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, - Key) when Headers =/= undefined -> - case lists:keyfind(Key, 1, Headers) of - false -> DecodedContent; - Tuple -> Headers0 = lists:delete(Tuple, Headers), +strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} + = DecodedContent, Key) when Headers =/= undefined -> + rabbit_binary_generator:clear_encoded_content( + case lists:keyfind(Key, 1, Headers) of + false -> DecodedContent; + Tuple -> Headers0 = lists:delete(Tuple, Headers), DecodedContent#content{ - properties_bin = none, properties = Props#'P_basic'{headers = Headers0}} - end; + end); strip_header(DecodedContent, _Key) -> DecodedContent. @@ -113,11 +113,10 @@ message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> #basic_message{ exchange_name = ExchangeName, - routing_key = RoutingKey, content = strip_header(DecodedContent, ?DELETED_HEADER), guid = rabbit_guid:guid(), is_persistent = is_message_persistent(DecodedContent), - route_list = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. + routing_keys = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> Properties = properties(RawProperties), @@ -164,26 +163,15 @@ is_message_persistent(#content{properties = #'P_basic'{ 1 -> false; 2 -> true; undefined -> false; - Other -> rabbit_log:warning("Unknown delivery mode ~p - " - "treating as 1, non-persistent~n", - [Other]), - false + _ -> false end. % Extract CC routes from headers header_routes(undefined) -> []; header_routes(HeadersTable) -> - lists:flatten([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {longstr, Route} -> Route; - {array, Routes} -> rkeys(Routes, []); - _ -> [] - end || HeaderKey <- ?ROUTING_HEADERS]). - -rkeys([{longstr, Route} | Rest], RKeys) -> - rkeys(Rest, [Route | RKeys]); -rkeys([_ | Rest], RKeys) -> - rkeys(Rest, RKeys); -rkeys(_, RKeys) -> - RKeys. + lists:append([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + _ -> [] + end || HeaderKey <- ?ROUTING_HEADERS]). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index be232bd2..16a3911d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -243,7 +243,7 @@ handle_cast({command, Msg}, State = #ch{writer_pid = WriterPid}) -> handle_cast({deliver, ConsumerTag, AckRequired, Msg = {_QName, QPid, _MsgId, Redelivered, #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, + routing_keys = [RoutingKey | _CcRoutes], content = Content}}}, State = #ch{writer_pid = WriterPid, next_tag = DeliveryTag}) -> @@ -609,7 +609,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, {ok, MessageCount, Msg = {_QName, QPid, _MsgId, Redelivered, #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, + routing_keys = [RoutingKey | _CcRoutes], content = Content}}} -> State1 = lock_message(not(NoAck), ack_record(DeliveryTag, none, Msg), @@ -1074,7 +1074,7 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, end. basic_return(#basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, + routing_keys = [RoutingKey | _CcRoutes], content = Content}, WriterPid, Reason) -> {_Close, ReplyCode, ReplyText} = diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 0baac1f8..82776c4a 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -36,9 +36,9 @@ description() -> {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{route_list = Routes}}) -> - lists:flatten([rabbit_router:match_routing_key(Name, RKey) || - RKey <- Routes]). + #delivery{message = #basic_message{routing_keys = Routes}}) -> + lists:append([rabbit_router:match_routing_key(Name, RKey) || + RKey <- Routes]). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index beee4974..27251d12 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -44,12 +44,12 @@ description() -> {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{route_list = Routes}}) -> - lists:flatten([rabbit_router:match_bindings( - Name, - fun (#binding{key = BindingKey}) -> - topic_matches(BindingKey, RKey) - end) || RKey <- Routes]). + #delivery{message = #basic_message{routing_keys = Routes}}) -> + lists:append([rabbit_router:match_bindings( + Name, + fun (#binding{key = BindingKey}) -> + topic_matches(BindingKey, RKey) + end) || RKey <- Routes]). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 3dbe740f..ab2300c0 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -64,7 +64,7 @@ -type(content() :: undecoded_content() | decoded_content()). -type(basic_message() :: #basic_message{exchange_name :: rabbit_exchange:name(), - routing_key :: rabbit_router:routing_key(), + routing_keys :: [rabbit_router:routing_key()], content :: content(), guid :: rabbit_guid:guid(), is_persistent :: boolean()}). -- cgit v1.2.1 From 340ae1fdefe6b7b9558292ca1e7ff43ecde06ac4 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 9 Feb 2011 12:16:20 +0000 Subject: Only clear encoded content when necessary --- src/rabbit_basic.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index f1348d33..5ea145d4 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -99,13 +99,13 @@ from_content(Content) -> %% This breaks the spec rule forbidding message modification strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, Key) when Headers =/= undefined -> - rabbit_binary_generator:clear_encoded_content( - case lists:keyfind(Key, 1, Headers) of - false -> DecodedContent; - Tuple -> Headers0 = lists:delete(Tuple, Headers), + case lists:keyfind(Key, 1, Headers) of + false -> DecodedContent; + Tuple -> Headers0 = lists:delete(Tuple, Headers), + rabbit_binary_generator:clear_encoded_content( DecodedContent#content{ - properties = Props#'P_basic'{headers = Headers0}} - end); + properties = Props#'P_basic'{headers = Headers0}}) + end; strip_header(DecodedContent, _Key) -> DecodedContent. -- cgit v1.2.1 From 2deb9a58ebc6bf078af6c136aa2f4a5f5a105103 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 12:50:08 +0000 Subject: Remove pointless test, fix comment. --- packaging/common/rabbitmq-server.ocf | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf index 07c7b7ca..f7322659 100755 --- a/packaging/common/rabbitmq-server.ocf +++ b/packaging/common/rabbitmq-server.ocf @@ -249,14 +249,8 @@ rabbit_start() { export_vars nohup $RABBITMQ_SERVER > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err & - rc=$? - - if [ "$rc" != 0 ]; then - ocf_log err "rabbitmq-server start command failed: $RABBITMQ_SERVER, $rc" - return $rc - fi - # Spin waiting for the server to come up. + # Wait for the server to come up. # Let the CRM/LRM time us out if required rabbit_wait rc=$? -- cgit v1.2.1 From d11305029e2d5d587b14e22cc5f04d957aa8777c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 15:59:17 +0000 Subject: Use setsid rather than nohup. --- packaging/common/rabbitmq-server.init | 4 ++-- packaging/common/rabbitmq-server.ocf | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index e3f1d13f..aa34fabd 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -40,8 +40,8 @@ start_rabbitmq () { else RETVAL=0 set +e - nohup $DAEMON > ${INIT_LOG_DIR}/startup_log \ - 2> ${INIT_LOG_DIR}/startup_err & + setsid sh -c "$DAEMON > ${INIT_LOG_DIR}/startup_log \ + 2> ${INIT_LOG_DIR}/startup_err" & $CONTROL wait >/dev/null 2>&1 case "$?" in 0) diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf index f7322659..94999d0e 100755 --- a/packaging/common/rabbitmq-server.ocf +++ b/packaging/common/rabbitmq-server.ocf @@ -248,7 +248,7 @@ rabbit_start() { export_vars - nohup $RABBITMQ_SERVER > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err & + setsid sh -c "$RABBITMQ_SERVER > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err" & # Wait for the server to come up. # Let the CRM/LRM time us out if required -- cgit v1.2.1 From 3f2244546b15181c20fef174ec2d6fdd0d192221 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 16:25:51 +0000 Subject: Correct the behaviour of /etc/init.d/rabbitmq-server status --- docs/rabbitmqctl.1.xml | 18 ++++++++++++++++++ packaging/common/rabbitmq-server.init | 4 ++-- src/rabbit_control.erl | 14 +++++++++++++- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 5c090e5a..2f2e8b41 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -197,6 +197,24 @@ + + init_status + + + Displays an init.d-style status line about whether the server + is running. + + For example: + rabbitmqctl init_status + + This command displays: + rabbit@hostname is running (pid 1234). + or + rabbit@hostname is NOT running. + + + + reset diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index aa34fabd..f5f06887 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -79,9 +79,9 @@ stop_rabbitmq () { status_rabbitmq() { set +e if [ "$1" != "quiet" ] ; then - $CONTROL status 2>&1 + $CONTROL init_status 2>&1 else - $CONTROL status > /dev/null 2>&1 + $CONTROL init_status > /dev/null 2>&1 fi if [ $? != 0 ] ; then RETVAL=1 diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index a8903102..3f7fdf92 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -25,6 +25,7 @@ -define(QUIET_OPT, "-q"). -define(NODE_OPT, "-n"). -define(VHOST_OPT, "-p"). +-define(INHERENTLY_QUIET, [init_status]). %%---------------------------------------------------------------------------- @@ -62,7 +63,8 @@ start() -> end end, Opts), Command = list_to_atom(Command0), - Quiet = proplists:get_bool(?QUIET_OPT, Opts1), + Quiet = proplists:get_bool(?QUIET_OPT, Opts1) + orelse lists:member(Command, ?INHERENTLY_QUIET), Node = proplists:get_value(?NODE_OPT, Opts1), Inform = case Quiet of true -> fun (_Format, _Args1) -> ok end; @@ -79,6 +81,8 @@ start() -> false -> io:format("...done.~n") end, quit(0); + fail_silent -> + quit(1); {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> print_error("invalid command '~s'", [string:join([atom_to_list(Command) | Args], " ")]), @@ -173,6 +177,14 @@ action(status, Node, [], _Opts, Inform) -> ok end; +action(init_status, Node, [], Opts, _) -> + case call(Node, {os, getpid, []}) of + {badrpc, _} -> io:format("~p is NOT running.", [Node]), + fail_silent; + Res -> io:format("~p is running (pid ~s).", [Node, Res]), + ok + end; + action(rotate_logs, Node, [], _Opts, Inform) -> Inform("Reopening logs for node ~p", [Node]), call(Node, {rabbit, rotate_logs, [""]}); -- cgit v1.2.1 From f3f1540b333309d4f2f74cb0a9520f7ff0d2d5da Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 16:27:48 +0000 Subject: http://refspecs.freestandards.org/LSB_3.1.1/LSB-Core-generic/LSB-Core-generic/iniscrptact.html: 0 program is running or service is OK 1 program is dead and /var/run pid file exists 2 program is dead and /var/lock lock file exists 3 program is not running --- packaging/common/rabbitmq-server.init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index f5f06887..e05cfc1b 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -84,7 +84,7 @@ status_rabbitmq() { $CONTROL init_status > /dev/null 2>&1 fi if [ $? != 0 ] ; then - RETVAL=1 + RETVAL=3 fi set -e } -- cgit v1.2.1 From f4c47ea3cf0264003194a03dafe3ab1c1bf9be9f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 16:52:05 +0000 Subject: Undo 3b4956543c30 --- docs/rabbitmqctl.1.xml | 18 ------------------ packaging/common/rabbitmq-server.init | 4 ++-- src/rabbit_control.erl | 14 +------------- 3 files changed, 3 insertions(+), 33 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 2f2e8b41..5c090e5a 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -197,24 +197,6 @@ - - init_status - - - Displays an init.d-style status line about whether the server - is running. - - For example: - rabbitmqctl init_status - - This command displays: - rabbit@hostname is running (pid 1234). - or - rabbit@hostname is NOT running. - - - - reset diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index e05cfc1b..ea21f098 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -79,9 +79,9 @@ stop_rabbitmq () { status_rabbitmq() { set +e if [ "$1" != "quiet" ] ; then - $CONTROL init_status 2>&1 + $CONTROL status 2>&1 else - $CONTROL init_status > /dev/null 2>&1 + $CONTROL status > /dev/null 2>&1 fi if [ $? != 0 ] ; then RETVAL=3 diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 3f7fdf92..a8903102 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -25,7 +25,6 @@ -define(QUIET_OPT, "-q"). -define(NODE_OPT, "-n"). -define(VHOST_OPT, "-p"). --define(INHERENTLY_QUIET, [init_status]). %%---------------------------------------------------------------------------- @@ -63,8 +62,7 @@ start() -> end end, Opts), Command = list_to_atom(Command0), - Quiet = proplists:get_bool(?QUIET_OPT, Opts1) - orelse lists:member(Command, ?INHERENTLY_QUIET), + Quiet = proplists:get_bool(?QUIET_OPT, Opts1), Node = proplists:get_value(?NODE_OPT, Opts1), Inform = case Quiet of true -> fun (_Format, _Args1) -> ok end; @@ -81,8 +79,6 @@ start() -> false -> io:format("...done.~n") end, quit(0); - fail_silent -> - quit(1); {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> print_error("invalid command '~s'", [string:join([atom_to_list(Command) | Args], " ")]), @@ -177,14 +173,6 @@ action(status, Node, [], _Opts, Inform) -> ok end; -action(init_status, Node, [], Opts, _) -> - case call(Node, {os, getpid, []}) of - {badrpc, _} -> io:format("~p is NOT running.", [Node]), - fail_silent; - Res -> io:format("~p is running (pid ~s).", [Node, Res]), - ok - end; - action(rotate_logs, Node, [], _Opts, Inform) -> Inform("Reopening logs for node ~p", [Node]), call(Node, {rabbit, rotate_logs, [""]}); -- cgit v1.2.1 From 571f68ed98a25c014411f93106b58be010ceb515 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 9 Feb 2011 16:56:18 +0000 Subject: Add pid to rabbitmqctl status. --- src/rabbit.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 67e2e40f..5ff96f90 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -212,7 +212,8 @@ stop_and_halt() -> ok. status() -> - [{running_applications, application:which_applications()}] ++ + [{pid, list_to_integer(os:getpid())}, + {running_applications, application:which_applications()}] ++ rabbit_mnesia:status(). rotate_logs(BinarySuffix) -> -- cgit v1.2.1 From d23a7dd42b8ee932dffdc9f9cb0c286bb3cb4982 Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Thu, 10 Feb 2011 16:32:03 +0000 Subject: Added code to raise or clear alarm "file_descriptor_limit" when transitions between being able to obtain file descriptors (e.g. for sockets) or not, and vice versa, occur. Method adjust_alarm contains the logic to set/clear alarm based on previous and new state. --- src/file_handle_cache.erl | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 1e1f37cb..a1b8efc1 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -869,13 +869,13 @@ handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, {noreply, reduce(State #fhc_state { obtain_pending = pending_in(Item, Pending) })}; false -> - {noreply, run_pending_item(Item, State)} + {noreply, adjust_alarm(State, run_pending_item(Item, State))} end; handle_call({set_limit, Limit}, _From, State) -> {reply, ok, maybe_reduce( - process_pending(State #fhc_state { + adjust_alarm(State, process_pending(State #fhc_state { limit = Limit, - obtain_limit = obtain_limit(Limit) }))}; + obtain_limit = obtain_limit(Limit) })))}; handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) -> {reply, Limit, State}. @@ -900,9 +900,9 @@ handle_cast({close, Pid, EldestUnusedSince}, _ -> dict:store(Pid, EldestUnusedSince, Elders) end, ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}), - {noreply, process_pending( + {noreply, adjust_alarm(State, process_pending( update_counts(open, Pid, -1, - State #fhc_state { elders = Elders1 }))}; + State #fhc_state { elders = Elders1 })))}; handle_cast({transfer, FromPid, ToPid}, State) -> ok = track_client(ToPid, State#fhc_state.clients), @@ -924,13 +924,15 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason}, ets:lookup(Clients, Pid), true = ets:delete(Clients, Pid), FilterFun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end, - {noreply, process_pending( - State #fhc_state { - open_count = OpenCount - Opened, - open_pending = filter_pending(FilterFun, OpenPending), - obtain_count = ObtainCount - Obtained, - obtain_pending = filter_pending(FilterFun, ObtainPending), - elders = dict:erase(Pid, Elders) })}. + {noreply, adjust_alarm( + State, + process_pending( + State #fhc_state { + open_count = OpenCount - Opened, + open_pending = filter_pending(FilterFun, OpenPending), + obtain_count = ObtainCount - Obtained, + obtain_pending = filter_pending(FilterFun, ObtainPending), + elders = dict:erase(Pid, Elders) }))}. terminate(_Reason, State = #fhc_state { clients = Clients }) -> ets:delete(Clients), @@ -990,6 +992,18 @@ obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of OLimit -> OLimit end. +obtain_limit_reached(#fhc_state { obtain_limit = Limit, + obtain_count = Count}) -> + Limit =/= infinity andalso Count >= Limit. + +adjust_alarm(OldState, NewState) -> + case {obtain_limit_reached(OldState), obtain_limit_reached(NewState)} of + {false, true} -> alarm_handler:set_alarm({file_descriptor_limit, []}); + {true, false} -> alarm_handler:clear_alarm(file_descriptor_limit); + _ -> ok + end, + NewState. + requested({_Kind, _Pid, Requested, _From}) -> Requested. -- cgit v1.2.1 From 2344574821599928d2af80a6026c92278f266a7e Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Fri, 11 Feb 2011 12:07:40 +0000 Subject: Refactored handle_call({obtain, ...}, ...) into single headed function --- src/file_handle_cache.erl | 71 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index a1b8efc1..9bb7abd7 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -849,33 +849,60 @@ handle_call({open, Pid, Requested, EldestUnusedSince}, From, false -> {noreply, run_pending_item(Item, State1)} end; -handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, - obtain_count = Count, - obtain_pending = Pending, - clients = Clients }) - when Limit =/= infinity andalso Count >= Limit -> - ok = track_client(Pid, Clients), - true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), - Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, - {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; +%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, +%% obtain_count = Count, +%% obtain_pending = Pending, +%% clients = Clients }) +%% when Limit =/= infinity andalso Count >= Limit -> +%% ok = track_client(Pid, Clients), +%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), +%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, +%% {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; + +%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, +%% obtain_pending = Pending, +%% clients = Clients }) -> +%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, +%% ok = track_client(Pid, Clients), +%% case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of +%% true -> +%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), +%% {noreply, reduce(State #fhc_state { +%% obtain_pending = pending_in(Item, Pending) })}; +%% false -> +%% {noreply, adjust_alarm(State, run_pending_item(Item, State))} +%% end; + handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, obtain_pending = Pending, clients = Clients }) -> - Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, ok = track_client(Pid, Clients), - case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of - true -> - true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), - {noreply, reduce(State #fhc_state { - obtain_pending = pending_in(Item, Pending) })}; - false -> - {noreply, adjust_alarm(State, run_pending_item(Item, State))} - end; + Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, + Enqueue = fun () -> + true = ets:update_element(Clients, Pid, + {#cstate.blocked, true}), + State #fhc_state { + obtain_pending = pending_in(Item, Pending) } + end, + {noreply, + case obtain_limit_reached(State) of + true -> Enqueue(); + false -> case needs_reduce(State #fhc_state { + obtain_count = Count + 1 }) of + true -> reduce(Enqueue()); + false -> adjust_alarm( + State, run_pending_item(Item, State)) + end + end}; + handle_call({set_limit, Limit}, _From, State) -> - {reply, ok, maybe_reduce( - adjust_alarm(State, process_pending(State #fhc_state { - limit = Limit, - obtain_limit = obtain_limit(Limit) })))}; + {reply, ok, adjust_alarm( + State, maybe_reduce( + process_pending( + State #fhc_state { + limit = Limit, + obtain_limit = obtain_limit(Limit) })))}; + handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) -> {reply, Limit, State}. -- cgit v1.2.1 From f70f04a1cbbc3fb1ba01bd2c4ed28ff764955fb1 Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Fri, 11 Feb 2011 12:11:43 +0000 Subject: remove commented out code --- src/file_handle_cache.erl | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 9bb7abd7..921b1211 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -849,30 +849,6 @@ handle_call({open, Pid, Requested, EldestUnusedSince}, From, false -> {noreply, run_pending_item(Item, State1)} end; -%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, -%% obtain_count = Count, -%% obtain_pending = Pending, -%% clients = Clients }) -%% when Limit =/= infinity andalso Count >= Limit -> -%% ok = track_client(Pid, Clients), -%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), -%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, -%% {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; - -%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, -%% obtain_pending = Pending, -%% clients = Clients }) -> -%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, -%% ok = track_client(Pid, Clients), -%% case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of -%% true -> -%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), -%% {noreply, reduce(State #fhc_state { -%% obtain_pending = pending_in(Item, Pending) })}; -%% false -> -%% {noreply, adjust_alarm(State, run_pending_item(Item, State))} -%% end; - handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, obtain_pending = Pending, clients = Clients }) -> -- cgit v1.2.1 From 99ac15fbc28d60adc0d38899a5a7f770530ca466 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 11 Feb 2011 12:51:50 +0000 Subject: Upgrade messages --- include/rabbit_backing_queue_spec.hrl | 3 ++ src/rabbit.erl | 1 + src/rabbit_msg_file.erl | 68 ++++++++++++++++++++--------------- src/rabbit_msg_store.erl | 62 ++++++++++++++++++++++++++++++++ src/rabbit_upgrade_functions.erl | 19 ++++++++++ src/rabbit_variable_queue.erl | 15 +++++++- 6 files changed, 138 insertions(+), 30 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index accb2c0e..52ffd413 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,3 +65,6 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). +-spec(transform_storage/1 :: + (fun ((binary()) -> (rabbit_types:ok_or_error2(any(), any())))) -> + non_neg_integer()). diff --git a/src/rabbit.erl b/src/rabbit.erl index c6661d39..9e241e80 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -38,6 +38,7 @@ -rabbit_boot_step({database, [{mfa, {rabbit_mnesia, init, []}}, + {requires, file_handle_cache}, {enables, external_infrastructure}]}). -rabbit_boot_step({file_handle_cache, diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index cfea4982..ad87ee16 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -16,7 +16,7 @@ -module(rabbit_msg_file). --export([append/3, read/2, scan/2]). +-export([append/3, read/2, scan/2, scan/3]). %%---------------------------------------------------------------------------- @@ -48,6 +48,9 @@ -spec(scan/2 :: (io_device(), file_size()) -> {'ok', [{rabbit_guid:guid(), msg_size(), position()}], position()}). +-spec(scan/3 :: (io_device(), file_size(), + fun ((rabbit_guid:guid(), msg_size(), position(), binary()) -> any())) -> + {'ok', [any()], position()}). -endif. @@ -79,43 +82,50 @@ read(FileHdl, TotalSize) -> KO -> KO end. +scan_fun(Guid, TotalSize, Offset, _Msg) -> + {Guid, TotalSize, Offset}. + scan(FileHdl, FileSize) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0). + scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/4). + +scan(FileHdl, FileSize, Fun) when FileSize >= 0 -> + scan(FileHdl, FileSize, <<>>, 0, [], 0, Fun). -scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset) -> +scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset, _Fun) -> {ok, Acc, ScanOffset}; -scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset) -> +scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset, Fun) -> Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]), case file_handle_cache:read(FileHdl, Read) of {ok, Data1} -> {Data2, Acc1, ScanOffset1} = - scan(<>, Acc, ScanOffset), + scanner(<>, Acc, ScanOffset, Fun), ReadOffset1 = ReadOffset + size(Data1), - scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1); + scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1, Fun); _KO -> {ok, Acc, ScanOffset} end. -scan(<<>>, Acc, Offset) -> - {<<>>, Acc, Offset}; -scan(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset) -> - {<<>>, Acc, Offset}; %% Nothing to do other than stop. -scan(<>, Acc, Offset) -> - TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, - case WriteMarker of - ?WRITE_OK_MARKER -> - %% Here we take option 5 from - %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the Guid as a number, and then convert it - %% back to a binary in order to work around bugs in - %% Erlang's GC. - <> = - <>, - <> = <>, - scan(Rest, [{Guid, TotalSize, Offset} | Acc], Offset + TotalSize); - _ -> - scan(Rest, Acc, Offset + TotalSize) - end; -scan(Data, Acc, Offset) -> - {Data, Acc, Offset}. +scanner(<<>>, Acc, Offset, _Fun) -> + {<<>>, Acc, Offset}; +scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset, _Fun) -> + {<<>>, Acc, Offset}; %% Nothing to do other than stop. +scanner(<>, Acc, Offset, Fun) -> + TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, + case WriteMarker of + ?WRITE_OK_MARKER -> + %% Here we take option 5 from + %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in + %% which we read the Guid as a number, and then convert it + %% back to a binary in order to work around bugs in + %% Erlang's GC. + <> = + <>, + <> = <>, + scanner(Rest, [Fun(Guid, TotalSize, Offset, Msg) | Acc], + Offset + TotalSize, Fun); + _ -> + scanner(Rest, Acc, Offset + TotalSize, Fun) + end; +scanner(Data, Acc, Offset, _Fun) -> + {Data, Acc, Offset}. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index e9c356e1..bd8d61e8 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -26,16 +26,20 @@ -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal +-export([transform_dir/3, force_recovery/2]). %% upgrade + -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). %%---------------------------------------------------------------------------- -include("rabbit_msg_store.hrl"). +-include_lib("kernel/include/file.hrl"). -define(SYNC_INTERVAL, 5). %% milliseconds -define(CLEAN_FILENAME, "clean.dot"). -define(FILE_SUMMARY_FILENAME, "file_summary.ets"). +-define(TRANSFORM_TMP, "transform_tmp"). -define(BINARY_MODE, [raw, binary]). -define(READ_MODE, [read]). @@ -160,6 +164,10 @@ -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). +-spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). +-spec(transform_dir/3 :: (file:filename(), server(), + fun ((binary())->({'ok', msg()} | {error, any()}))) -> + non_neg_integer()). -endif. @@ -1956,3 +1964,57 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {got, FinalOffsetZ}, {destination, Destination}]} end. + +force_recovery(BaseDir, Server) -> + Dir = filename:join(BaseDir, atom_to_list(Server)), + file:delete(filename:join(Dir, ?CLEAN_FILENAME)), + [file:delete(filename:join(Dir, File)) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], + ok. + +transform_dir(BaseDir, Server, TransformFun) -> + Dir = filename:join(BaseDir, atom_to_list(Server)), + TmpDir = filename:join(Dir, ?TRANSFORM_TMP), + case filelib:is_dir(TmpDir) of + true -> throw({error, previously_failed_transform}); + false -> + Count = lists:sum( + [transform_msg_file(filename:join(Dir, File), + filename:join(TmpDir, File), + TransformFun) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)]), + [file:delete(filename:join(Dir, File)) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], + [file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) || + File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], + [file:delete(filename:join(TmpDir, File)) || + File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], + ok = file:del_dir(TmpDir), + Count + end. + +transform_msg_file(FileOld, FileNew, TransformFun) -> + rabbit_misc:ensure_parent_dirs_exist(FileNew), + {ok, #file_info{size=Size}} = file:read_file_info(FileOld), + {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), + {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], + [{write_buffer, + ?HANDLE_CACHE_BUFFER_SIZE}]), + {ok, Acc, Size} = + rabbit_msg_file:scan( + RefOld, Size, + fun(Guid, _Size, _Offset, BinMsg) -> + case TransformFun(BinMsg) of + {ok, MsgNew} -> + rabbit_msg_file:append(RefNew, Guid, MsgNew), + 1; + {error, Reason} -> + error_logger:error_msg("Message transform failed: ~p~n", + [Reason]), + 0 + end + end), + file_handle_cache:close(RefOld), + file_handle_cache:close(RefNew), + lists:sum(Acc). + diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 68b88b3e..f4e27cc8 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -25,6 +25,7 @@ -rabbit_upgrade({add_ip_to_listener, []}). -rabbit_upgrade({internal_exchanges, []}). -rabbit_upgrade({user_to_internal_user, [hash_passwords]}). +-rabbit_upgrade({multiple_routing_keys, []}). %% ------------------------------------------------------------------- @@ -35,6 +36,7 @@ -spec(add_ip_to_listener/0 :: () -> 'ok'). -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). +-spec(multiple_routing_keys/0 :: () -> 'ok'). -endif. @@ -101,3 +103,20 @@ mnesia(TableName, Fun, FieldList, NewRecordName) -> {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, NewRecordName), ok. + +%%-------------------------------------------------------------------- + +multiple_routing_keys() -> + _UpgradeMsgCount = rabbit_variable_queue:transform_storage( + fun (BinMsg) -> + case binary_to_term(BinMsg) of + {basic_message, ExchangeName, Routing_Key, Content, Guid, + Persistent} -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + _ -> + {error, corrupt_message} + end + end), + ok. + diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 7142d560..f2176c0e 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1]). + status/1, transform_storage/1]). -export([start/1, stop/0]). @@ -1801,3 +1801,16 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> push_betas_to_deltas( Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) end. + +%%---------------------------------------------------------------------------- +%% Upgrading +%%---------------------------------------------------------------------------- + +%% Assumes message store is not running +transform_storage(TransformFun) -> + transform_store(?PERSISTENT_MSG_STORE, TransformFun) + + transform_store(?TRANSIENT_MSG_STORE, TransformFun). + +transform_store(Store, TransformFun) -> + rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), + rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). -- cgit v1.2.1 From 131e0bcdad6b6ecaa82ae807ec033a289c937179 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 11 Feb 2011 17:28:13 +0000 Subject: rabbit_msg_file:scan/4 now looks a bit more like fold Also ignore garbage at the end of a message store --- src/rabbit_msg_file.erl | 20 ++++++++++---------- src/rabbit_msg_store.erl | 31 ++++++++++++++----------------- src/rabbit_upgrade_functions.erl | 2 +- src/rabbit_variable_queue.erl | 2 +- 4 files changed, 26 insertions(+), 29 deletions(-) diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index ad87ee16..9d5953d5 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -16,7 +16,7 @@ -module(rabbit_msg_file). --export([append/3, read/2, scan/2, scan/3]). +-export([append/3, read/2, scan/2, scan/4]). %%---------------------------------------------------------------------------- @@ -48,9 +48,9 @@ -spec(scan/2 :: (io_device(), file_size()) -> {'ok', [{rabbit_guid:guid(), msg_size(), position()}], position()}). --spec(scan/3 :: (io_device(), file_size(), - fun ((rabbit_guid:guid(), msg_size(), position(), binary()) -> any())) -> - {'ok', [any()], position()}). +-spec(scan/4 :: (io_device(), file_size(), + fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), + A) -> {'ok', A, position()}). -endif. @@ -82,14 +82,14 @@ read(FileHdl, TotalSize) -> KO -> KO end. -scan_fun(Guid, TotalSize, Offset, _Msg) -> - {Guid, TotalSize, Offset}. +scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> + [{Guid, TotalSize, Offset} | Acc]. scan(FileHdl, FileSize) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/4). + scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/2). -scan(FileHdl, FileSize, Fun) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0, Fun). +scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> + scan(FileHdl, FileSize, <<>>, 0, Acc, 0, Fun). scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset, _Fun) -> {ok, Acc, ScanOffset}; @@ -122,7 +122,7 @@ scanner(<> = <>, <> = <>, - scanner(Rest, [Fun(Guid, TotalSize, Offset, Msg) | Acc], + scanner(Rest, Fun({Guid, TotalSize, Offset, Msg}, Acc), Offset + TotalSize, Fun); _ -> scanner(Rest, Acc, Offset + TotalSize, Fun) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index bd8d61e8..b827eba9 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -166,8 +166,7 @@ -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). -spec(transform_dir/3 :: (file:filename(), server(), - fun ((binary())->({'ok', msg()} | {error, any()}))) -> - non_neg_integer()). + fun ((binary()) -> ({'ok', msg()} | {error, any()}))) -> 'ok'). -endif. @@ -1976,21 +1975,19 @@ transform_dir(BaseDir, Server, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Server)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), case filelib:is_dir(TmpDir) of - true -> throw({error, previously_failed_transform}); + true -> throw({error, transform_failed_previously}); false -> - Count = lists:sum( - [transform_msg_file(filename:join(Dir, File), - filename:join(TmpDir, File), - TransformFun) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)]), + [transform_msg_file(filename:join(Dir, File), + filename:join(TmpDir, File), + TransformFun) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], [file:delete(filename:join(Dir, File)) || File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], [file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) || File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], [file:delete(filename:join(TmpDir, File)) || File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], - ok = file:del_dir(TmpDir), - Count + ok = file:del_dir(TmpDir) end. transform_msg_file(FileOld, FileNew, TransformFun) -> @@ -2000,21 +1997,21 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), - {ok, Acc, Size} = + {ok, Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, Size, - fun(Guid, _Size, _Offset, BinMsg) -> + fun({Guid, _Size, _Offset, BinMsg}, ok) -> case TransformFun(BinMsg) of {ok, MsgNew} -> - rabbit_msg_file:append(RefNew, Guid, MsgNew), - 1; + {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + ok; {error, Reason} -> error_logger:error_msg("Message transform failed: ~p~n", [Reason]), - 0 + ok end - end), + end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), - lists:sum(Acc). + ok = Acc. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index f4e27cc8..73f59557 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -107,7 +107,7 @@ mnesia(TableName, Fun, FieldList, NewRecordName) -> %%-------------------------------------------------------------------- multiple_routing_keys() -> - _UpgradeMsgCount = rabbit_variable_queue:transform_storage( + rabbit_variable_queue:transform_storage( fun (BinMsg) -> case binary_to_term(BinMsg) of {basic_message, ExchangeName, Routing_Key, Content, Guid, diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index f2176c0e..dee6a8e5 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -1808,7 +1808,7 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> %% Assumes message store is not running transform_storage(TransformFun) -> - transform_store(?PERSISTENT_MSG_STORE, TransformFun) + + transform_store(?PERSISTENT_MSG_STORE, TransformFun), transform_store(?TRANSIENT_MSG_STORE, TransformFun). transform_store(Store, TransformFun) -> -- cgit v1.2.1 From a62685c1495b2e95f2e127ab607ec1634a18cc62 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 11 Feb 2011 17:56:09 +0000 Subject: Remove rabbit_msg_file:scan/2 --- src/rabbit_msg_file.erl | 11 +---------- src/rabbit_msg_store.erl | 6 +++++- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 9d5953d5..81f2f07e 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -16,7 +16,7 @@ -module(rabbit_msg_file). --export([append/3, read/2, scan/2, scan/4]). +-export([append/3, read/2, scan/4]). %%---------------------------------------------------------------------------- @@ -45,9 +45,6 @@ -spec(read/2 :: (io_device(), msg_size()) -> rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, any())). --spec(scan/2 :: (io_device(), file_size()) -> - {'ok', [{rabbit_guid:guid(), msg_size(), position()}], - position()}). -spec(scan/4 :: (io_device(), file_size(), fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), A) -> {'ok', A, position()}). @@ -82,12 +79,6 @@ read(FileHdl, TotalSize) -> KO -> KO end. -scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> - [{Guid, TotalSize, Offset} | Acc]. - -scan(FileHdl, FileSize) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/2). - scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> scan(FileHdl, FileSize, <<>>, 0, Acc, 0, Fun). diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index b827eba9..82fb1735 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1530,7 +1530,8 @@ scan_file_for_valid_messages(Dir, FileName) -> case open_file(Dir, FileName, ?READ_MODE) of {ok, Hdl} -> Valid = rabbit_msg_file:scan( Hdl, filelib:file_size( - form_filename(Dir, FileName))), + form_filename(Dir, FileName)), + fun scan_fun/2, []), %% if something really bad has happened, %% the close could fail, but ignore file_handle_cache:close(Hdl), @@ -1539,6 +1540,9 @@ scan_file_for_valid_messages(Dir, FileName) -> {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} end. +scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> + [{Guid, TotalSize, Offset} | Acc]. + %% Takes the list in *ascending* order (i.e. eldest message %% first). This is the opposite of what scan_file_for_valid_messages %% produces. The list of msgs that is produced is youngest first. -- cgit v1.2.1 From 2d91f7b8e01c19f8e1e81199eb9fedf9ef485333 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sat, 12 Feb 2011 20:59:39 +0000 Subject: Added documentation for gm 'become' callback result --- include/gm_specs.hrl | 2 +- src/gm.erl | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index 987866db..2109d15d 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -16,7 +16,7 @@ -ifdef(use_specs). --type(callback_result() :: 'ok' | {'stop', any()}). +-type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}). -type(args() :: [any()]). -type(members() :: [pid()]). diff --git a/src/gm.erl b/src/gm.erl index 8fea9196..283b2431 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -432,6 +432,20 @@ behaviour_info(callbacks) -> [ + %% The joined, members_changed and handle_msg callbacks can all + %% return any of the following terms: + %% + %% 'ok' - the callback function returns normally + %% + %% {'stop', Reason} - the callback indicates the member should + %% stop with reason Reason and should leave the group. + %% + %% {'become', Module, Args} - the callback indicates that the + %% callback module should be changed to Module and that the + %% callback functions should now be passed the arguments + %% Args. This allows the callback module to be dynamically + %% changed. + %% Called when we've successfully joined the group. Supplied with %% Args provided in start_link, plus current group members. {joined, 2}, -- cgit v1.2.1 From 0b4ffb33067b778ebbe30fd2c4b0b9f9160c18c3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Feb 2011 13:08:43 +0000 Subject: Be explicit where we can be --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f7befebc..e7da6a43 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -373,7 +373,7 @@ init_db(ClusterNodes, Force) -> {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[], _} -> + {[], true} -> %% We're the first node up ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade(local) of -- cgit v1.2.1 From 4b329480306627ea62aa972e432a4a78fab339c4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Feb 2011 13:19:20 +0000 Subject: More explicit documentation. --- docs/rabbitmqctl.1.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 5c090e5a..3550e5ea 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -167,8 +167,8 @@ This command will wait for the RabbitMQ application to start at the node. As long as the Erlang node is up but the RabbitMQ application is down it will wait - indefinitely. If the node itself goes down, or takes too - long to come up, it will fail. + indefinitely. If the node itself goes down, or takes + more than five seconds to come up, it will fail. For example: rabbitmqctl wait -- cgit v1.2.1 From a9d0297ed433b062c985c5622feaa5c7d337c0eb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Feb 2011 13:27:05 +0000 Subject: Make "set +e" cover rather less code. --- packaging/common/rabbitmq-server.init | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index ea21f098..21e22eab 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -43,27 +43,28 @@ start_rabbitmq () { setsid sh -c "$DAEMON > ${INIT_LOG_DIR}/startup_log \ 2> ${INIT_LOG_DIR}/startup_err" & $CONTROL wait >/dev/null 2>&1 - case "$?" in + RETVAL=$? + set -e + case "$RETVAL" in 0) echo SUCCESS [ -n "$LOCK_FILE" ] && touch $LOCK_FILE - RETVAL=0 ;; *) echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\} RETVAL=1 ;; esac - set -e fi } stop_rabbitmq () { - set +e status_rabbitmq quiet if [ $RETVAL = 0 ] ; then + set +e $CONTROL stop > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err RETVAL=$? + set -e if [ $RETVAL = 0 ] ; then [ -n "$LOCK_FILE" ] && rm -rf $LOCK_FILE else @@ -73,7 +74,6 @@ stop_rabbitmq () { echo RabbitMQ is not running RETVAL=0 fi - set -e } status_rabbitmq() { -- cgit v1.2.1 From 1c45e13da167b1cc01992521089efda440d29f65 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Feb 2011 13:39:05 +0000 Subject: Cosmetic --- src/rabbit_upgrade.erl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f279029a..bd3e829c 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -49,19 +49,19 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of - [_|_] = Upgrades -> - case am_i_upgrader(Nodes) of - true -> primary_upgrade(Upgrades, Nodes); - false -> non_primary_upgrade(Nodes) - end; - [] -> - ok; version_not_available -> case Nodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " "< 2.1.1.~n Unfortunately you will need to " "rebuild the cluster.", []) + end; + [] -> + ok; + Upgrades -> + case am_i_upgrader(Nodes) of + true -> primary_upgrade(Upgrades, Nodes); + false -> non_primary_upgrade(Nodes) end end. -- cgit v1.2.1 From 650217882d88c150663e17b8e5a9a8ce4f59f9a4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Feb 2011 13:59:39 +0000 Subject: inlining --- src/rabbit_upgrade.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index bd3e829c..c8d2ae87 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -66,8 +66,7 @@ maybe_upgrade_mnesia() -> end. am_i_upgrader(Nodes) -> - Running = nodes_running(Nodes), - case Running of + case nodes_running(Nodes) of [] -> case am_i_disc_node() of true -> true; -- cgit v1.2.1 From 394c73b033ca71d98b0572317852b107abe97a38 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 14 Feb 2011 16:57:16 +0000 Subject: Sender-selected distribution updates --- src/rabbit_basic.erl | 2 +- src/rabbit_msg_store.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 5ea145d4..7fa68882 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -101,7 +101,7 @@ strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, Key) when Headers =/= undefined -> case lists:keyfind(Key, 1, Headers) of false -> DecodedContent; - Tuple -> Headers0 = lists:delete(Tuple, Headers), + Found -> Headers0 = lists:delete(Found, Headers), rabbit_binary_generator:clear_encoded_content( DecodedContent#content{ properties = Props#'P_basic'{headers = Headers0}}) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 82fb1735..f7afbef5 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -2001,7 +2001,7 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), - {ok, Acc, _IgnoreSize} = + {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, Size, fun({Guid, _Size, _Offset, BinMsg}, ok) -> @@ -2017,5 +2017,5 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), - ok = Acc. + ok. -- cgit v1.2.1 From afe476dc81752d9708fc07c48390f438989f27b1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Feb 2011 12:29:33 +0000 Subject: Move Protocol to after vhost consistently --- src/rabbit_channel.erl | 12 ++++++------ src/rabbit_channel_sup.erl | 14 +++++++------- src/rabbit_direct.erl | 14 +++++++------- src/rabbit_reader.erl | 4 ++-- src/rabbit_tests.erl | 10 ++++++---- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b9d1baf0..12a668ad 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -67,8 +67,8 @@ -type(channel_number() :: non_neg_integer()). -spec(start_link/8 :: - (rabbit_types:protocol(), channel_number(), pid(), pid(), - rabbit_types:user(), rabbit_types:vhost(), pid(), + (channel_number(), pid(), pid(), rabbit_types:user(), + rabbit_types:vhost(), rabbit_types:protocol(), pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> rabbit_types:ok_pid_or_error()). -spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). @@ -95,11 +95,11 @@ %%---------------------------------------------------------------------------- -start_link(Protocol, Channel, ReaderPid, WriterPid, User, VHost, CollectorPid, +start_link(Channel, ReaderPid, WriterPid, User, VHost, Protocol, CollectorPid, StartLimiterFun) -> gen_server2:start_link(?MODULE, - [Protocol, Channel, ReaderPid, WriterPid, User, - VHost, CollectorPid, StartLimiterFun], []). + [Channel, ReaderPid, WriterPid, User, VHost, + Protocol, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> do(Pid, Method, none). @@ -153,7 +153,7 @@ ready_for_close(Pid) -> %%--------------------------------------------------------------------------- -init([Protocol, Channel, ReaderPid, WriterPid, User, VHost, CollectorPid, +init([Channel, ReaderPid, WriterPid, User, VHost, Protocol, CollectorPid, StartLimiterFun]) -> process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 9bc0546c..f528a9c6 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -31,11 +31,11 @@ -export_type([start_link_args/0]). -type(start_link_args() :: - {'tcp', rabbit_types:protocol(), rabbit_net:socket(), - rabbit_channel:channel_number(), non_neg_integer(), pid(), - rabbit_types:user(), rabbit_types:vhost(), pid()} | - {'direct', rabbit_types:protocol(), rabbit_channel:channel_number(), - pid(), rabbit_types:user(), rabbit_types:vhost(), pid()}). + {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(), + non_neg_integer(), pid(), rabbit_types:user(), rabbit_types:vhost(), + rabbit_types:protocol(), pid()} | + {'direct', rabbit_channel:channel_number(), pid(), rabbit_types:user(), + rabbit_types:vhost(), rabbit_types:protocol(), pid()}). -spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}). @@ -43,7 +43,7 @@ %%---------------------------------------------------------------------------- -start_link({tcp, Protocol, Sock, Channel, FrameMax, ReaderPid, User, VHost, +start_link({tcp, Sock, Channel, FrameMax, ReaderPid, User, VHost, Protocol, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, WriterPid} = @@ -61,7 +61,7 @@ start_link({tcp, Protocol, Sock, Channel, FrameMax, ReaderPid, User, VHost, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), {ok, SupPid, {ChannelPid, AState}}; -start_link({direct, Protocol, Channel, ClientChannelPid, User, VHost, +start_link({direct, Channel, ClientChannelPid, User, VHost, Protocol, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 3b8c9fba..8ee7aafc 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -16,7 +16,7 @@ -module(rabbit_direct). --export([boot/0, connect/3, start_channel/5]). +-export([boot/0, connect/3, start_channel/6]). -include("rabbit.hrl"). @@ -28,9 +28,9 @@ -spec(connect/3 :: (binary(), binary(), binary()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). --spec(start_channel/5 :: (rabbit_channel:channel_number(), pid(), - rabbit_types:user(), rabbit_types:vhost(), pid()) -> - {'ok', pid()}). +-spec(start_channel/6 :: (rabbit_channel:channel_number(), pid(), + rabbit_types:user(), rabbit_types:vhost(), + rabbit_types:protocol(), pid()) -> {'ok', pid()}). -endif. @@ -67,9 +67,9 @@ connect(Username, Password, VHost) -> {error, broker_not_found_on_node} end. -start_channel(Number, ClientChannelPid, User, VHost, Collector) -> +start_channel(Number, ClientChannelPid, User, VHost, Protocol, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( - rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, User, VHost, Collector}]), + rabbit_direct_client_sup, [{direct, Number, ClientChannelPid, User, + VHost, Protocol, Collector}]), {ok, ChannelPid}. diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index a9403105..f54d52e5 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -852,8 +852,8 @@ send_to_new_channel(Channel, AnalyzedFrame, State) -> vhost = VHost}} = State, {ok, _ChSupPid, {ChPid, AState}} = rabbit_channel_sup_sup:start_channel( - ChanSupSup, {tcp, Protocol, Sock, Channel, FrameMax, self(), User, - VHost, Collector}), + ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), User, VHost, + Protocol, Collector}), MRef = erlang:monitor(process, ChPid), NewAState = process_channel_frame(AnalyzedFrame, self(), Channel, ChPid, AState), diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 45a11766..f176dee9 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1020,8 +1020,9 @@ test_server_status() -> %% create a few things so there is some useful information to list Writer = spawn(fun () -> receive shutdown -> ok end end), {ok, Ch} = rabbit_channel:start_link( - rabbit_framing_amqp_0_9_1, 1, self(), Writer, user(<<"user">>), - <<"/">>, self(), fun (_) -> {ok, self()} end), + 1, self(), Writer, user(<<"user">>), <<"/">>, + rabbit_framing_amqp_0_9_1, self(), + fun (_) -> {ok, self()} end), [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], {new, Queue = #amqqueue{}} <- [rabbit_amqqueue:declare( @@ -1080,8 +1081,9 @@ test_spawn(Receiver) -> Me = self(), Writer = spawn(fun () -> Receiver(Me) end), {ok, Ch} = rabbit_channel:start_link( - rabbit_framing_amqp_0_9_1, 1, Me, Writer, user(<<"guest">>), - <<"/">>, self(), fun (_) -> {ok, self()} end), + 1, Me, Writer, user(<<"guest">>), <<"/">>, + rabbit_framing_amqp_0_9_1, self(), + fun (_) -> {ok, self()} end), ok = rabbit_channel:do(Ch, #'channel.open'{}), receive #'channel.open_ok'{} -> ok after 1000 -> throw(failed_to_receive_channel_open_ok) -- cgit v1.2.1 From 74b1a058173948328d6b1ad7565ab2d82b840848 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Feb 2011 12:36:40 +0000 Subject: /Consistently/... --- src/rabbit_channel_sup.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index f528a9c6..fdaabdfb 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -56,7 +56,7 @@ start_link({tcp, Sock, Channel, FrameMax, ReaderPid, User, VHost, Protocol, supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Protocol, Channel, ReaderPid, WriterPid, User, VHost, + [Channel, ReaderPid, WriterPid, User, VHost, Protocol, Collector, start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), @@ -68,8 +68,8 @@ start_link({direct, Channel, ClientChannelPid, User, VHost, Protocol, supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Protocol, Channel, ClientChannelPid, ClientChannelPid, - User, VHost, Collector, start_limiter_fun(SupPid)]}, + [Channel, ClientChannelPid, ClientChannelPid, User, + VHost, Protocol, Collector, start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. -- cgit v1.2.1 From 75a45fa69fd42ba91aaca751102e8f53cfad75f5 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Feb 2011 13:51:58 +0000 Subject: 404 => NOT_FOUND --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index f176dee9..9f12be7c 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1307,7 +1307,7 @@ test_queue_cleanup(_SecondaryNode) -> rabbit_channel:do(Ch, #'queue.declare'{ passive = true, queue = ?CLEANUP_QUEUE_NAME }), receive - #'channel.close'{reply_code = 404} -> + #'channel.close'{reply_code = ?NOT_FOUND} -> ok after 2000 -> throw(failed_to_receive_channel_exit) -- cgit v1.2.1 From 43515b69fabeab63c1261b5d36c49274a3413bcc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 15 Feb 2011 15:10:03 +0000 Subject: Make the timeout code a bit clearer. --- src/rabbit_control.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index a8903102..f0b4ced1 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -20,7 +20,7 @@ -export([start/0, stop/0, action/5, diagnostics/1]). -define(RPC_TIMEOUT, infinity). --define(WAIT_FOR_VM_TIMEOUT, 5000). +-define(WAIT_FOR_VM_ATTEMPTS, 5). -define(QUIET_OPT, "-q"). -define(NODE_OPT, "-n"). @@ -302,14 +302,14 @@ action(list_permissions, Node, [], Opts, Inform) -> action(wait, Node, [], _Opts, Inform) -> Inform("Waiting for ~p", [Node]), - wait_for_application(Node, ?WAIT_FOR_VM_TIMEOUT). + wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). -wait_for_application(Node, NodeTimeout) -> +wait_for_application(Node, Attempts) -> case rpc_call(Node, application, which_applications, [infinity]) of - {badrpc, _} = E -> NewTimeout = NodeTimeout - 1000, - case NewTimeout =< 0 of - true -> E; - false -> wait_for_application0(Node, NewTimeout) + {badrpc, _} = E -> NewAttempts = Attempts - 1, + case NewAttempts of + 0 -> E; + _ -> wait_for_application0(Node, NewAttempts) end; Apps -> case proplists:is_defined(rabbit, Apps) of %% We've seen the node up; if it goes down @@ -319,9 +319,9 @@ wait_for_application(Node, NodeTimeout) -> end end. -wait_for_application0(Node, NodeTimeout) -> +wait_for_application0(Node, Attempts) -> timer:sleep(1000), - wait_for_application(Node, NodeTimeout). + wait_for_application(Node, Attempts). default_if_empty(List, Default) when is_list(List) -> if List == [] -> -- cgit v1.2.1 From 17ebfb85ebc28c01dfc29e7089dbbf6d1688bc6c Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 15 Feb 2011 15:27:52 +0000 Subject: Sender-specified distribution updates --- include/rabbit_backing_queue_spec.hrl | 4 +--- src/rabbit_basic.erl | 41 +++++++++++++++++++++------------- src/rabbit_channel.erl | 36 +++++++++++++++++------------- src/rabbit_msg_store.erl | 42 +++++++++++++++++++++++------------ src/rabbit_upgrade_functions.erl | 19 ---------------- src/rabbit_variable_queue.erl | 18 ++++++++++++++- 6 files changed, 93 insertions(+), 67 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 52ffd413..17cdedc2 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,6 +65,4 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(transform_storage/1 :: - (fun ((binary()) -> (rabbit_types:ok_or_error2(any(), any())))) -> - non_neg_integer()). +-spec(multiple_routing_keys/0 :: () -> 'ok'). diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 7fa68882..503f01bc 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -31,6 +31,7 @@ -type(publish_result() :: ({ok, rabbit_router:routing_result(), [pid()]} | rabbit_types:error('not_found'))). +-type(msg_or_error() :: {'ok', rabbit_types:message()} | {'error', any()}). -spec(publish/1 :: (rabbit_types:delivery()) -> publish_result()). @@ -40,10 +41,10 @@ rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> rabbit_types:message()). + properties_input(), binary()) -> msg_or_error()). -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - rabbit_types:decoded_content()) -> rabbit_types:message()). + rabbit_types:decoded_content()) -> msg_or_error()). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -111,17 +112,23 @@ strip_header(DecodedContent, _Key) -> message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> - #basic_message{ - exchange_name = ExchangeName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - guid = rabbit_guid:guid(), - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. - -message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> + try + {ok, #basic_message{ + exchange_name = ExchangeName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + guid = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | + header_routes(Props#'P_basic'.headers)]}} + catch + {error, _Reason} = Error -> Error + end. + +message(ExchangeName, RoutingKey, RawProperties, BodyBin) -> Properties = properties(RawProperties), Content = build_content(Properties, BodyBin), - message(ExchangeName, RoutingKeyBin, Content). + {ok, Msg} = message(ExchangeName, RoutingKey, Content), + Msg. properties(P = #'P_basic'{}) -> P; @@ -170,8 +177,12 @@ is_message_persistent(#content{properties = #'P_basic'{ header_routes(undefined) -> []; header_routes(HeadersTable) -> - lists:append([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {array, Routes} -> [Route || {longstr, Route} <- Routes]; - _ -> [] - end || HeaderKey <- ?ROUTING_HEADERS]). + lists:append( + [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + undefined -> []; + {Type, _Val} -> throw({error, {unacceptable_type_in_header, + Type, + binary_to_list(HeaderKey)}}) + end || HeaderKey <- ?ROUTING_HEADERS]). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 16a3911d..162580ec 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -555,21 +555,27 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, true -> SeqNo = State#ch.publish_seqno, {SeqNo, State#ch{publish_seqno = SeqNo + 1}} end, - Message = rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent), - {RoutingRes, DeliveredQPids} = - rabbit_exchange:publish( - Exchange, - rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message, - MsgSeqNo)), - State2 = process_routing_result(RoutingRes, DeliveredQPids, ExchangeName, - MsgSeqNo, Message, State1), - maybe_incr_stats([{ExchangeName, 1} | - [{{QPid, ExchangeName}, 1} || - QPid <- DeliveredQPids]], publish, State2), - {noreply, case TxnKey of - none -> State2; - _ -> add_tx_participants(DeliveredQPids, State2) - end}; + case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of + {ok, Message} -> + {RoutingRes, DeliveredQPids} = + rabbit_exchange:publish( + Exchange, + rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message, + MsgSeqNo)), + State2 = process_routing_result(RoutingRes, DeliveredQPids, + ExchangeName, MsgSeqNo, Message, + State1), + maybe_incr_stats([{ExchangeName, 1} | + [{{QPid, ExchangeName}, 1} || + QPid <- DeliveredQPids]], publish, State2), + {noreply, case TxnKey of + none -> State2; + _ -> add_tx_participants(DeliveredQPids, State2) + end}; + {error, Reason} -> + rabbit_misc:protocol_error(precondition_failed, + "invalid message: ~p", [Reason]) + end; handle_method(#'basic.nack'{delivery_tag = DeliveryTag, multiple = Multiple, diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index f7afbef5..00c2ab18 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1968,29 +1968,43 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {destination, Destination}]} end. -force_recovery(BaseDir, Server) -> - Dir = filename:join(BaseDir, atom_to_list(Server)), +force_recovery(BaseDir, Store) -> + Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), [file:delete(filename:join(Dir, File)) || File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -transform_dir(BaseDir, Server, TransformFun) -> - Dir = filename:join(BaseDir, atom_to_list(Server)), +for_each_file(Files, Fun) -> + [Fun(File) || File <- Files]. + +transform_dir(BaseDir, Store, TransformFun) -> + Dir = filename:join(BaseDir, atom_to_list(Store)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); false -> - [transform_msg_file(filename:join(Dir, File), - filename:join(TmpDir, File), - TransformFun) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], - [file:delete(filename:join(Dir, File)) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], - [file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) || - File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], - [file:delete(filename:join(TmpDir, File)) || - File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], + OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), + for_each_file(OldFileList, + fun (File) -> + transform_msg_file(filename:join(Dir, File), + filename:join(TmpDir, File), + TransformFun) + end), + for_each_file(OldFileList, + fun (File) -> + file:delete(filename:join(Dir, File)) + end), + NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), + for_each_file(NewFileList, + fun (File) -> + file:copy(filename:join(TmpDir, File), + filename:join(Dir, File)) + end), + for_each_file(NewFileList, + fun (File) -> + file:delete(filename:join(TmpDir, File)) + end), ok = file:del_dir(TmpDir) end. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 73f59557..68b88b3e 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -25,7 +25,6 @@ -rabbit_upgrade({add_ip_to_listener, []}). -rabbit_upgrade({internal_exchanges, []}). -rabbit_upgrade({user_to_internal_user, [hash_passwords]}). --rabbit_upgrade({multiple_routing_keys, []}). %% ------------------------------------------------------------------- @@ -36,7 +35,6 @@ -spec(add_ip_to_listener/0 :: () -> 'ok'). -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). --spec(multiple_routing_keys/0 :: () -> 'ok'). -endif. @@ -103,20 +101,3 @@ mnesia(TableName, Fun, FieldList, NewRecordName) -> {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, NewRecordName), ok. - -%%-------------------------------------------------------------------- - -multiple_routing_keys() -> - rabbit_variable_queue:transform_storage( - fun (BinMsg) -> - case binary_to_term(BinMsg) of - {basic_message, ExchangeName, Routing_Key, Content, Guid, - Persistent} -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - _ -> - {error, corrupt_message} - end - end), - ok. - diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index dee6a8e5..b0781f8f 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, transform_storage/1]). + status/1, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -294,6 +294,8 @@ %%---------------------------------------------------------------------------- +-rabbit_upgrade({multiple_routing_keys, []}). + -ifdef(use_specs). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). @@ -1806,6 +1808,20 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> %% Upgrading %%---------------------------------------------------------------------------- +multiple_routing_keys() -> + transform_storage( + fun (BinMsg) -> + case binary_to_term(BinMsg) of + {basic_message, ExchangeName, Routing_Key, Content, Guid, + Persistent} -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + _ -> + {error, corrupt_message} + end + end), + ok. + %% Assumes message store is not running transform_storage(TransformFun) -> transform_store(?PERSISTENT_MSG_STORE, TransformFun), -- cgit v1.2.1 From 944de8b5e3aec103afc672666bbf6044e8379016 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 15 Feb 2011 15:56:04 +0000 Subject: Swapped helper function arguments --- src/rabbit_msg_store.erl | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 00c2ab18..a9d1e210 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1975,7 +1975,7 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(Files, Fun) -> +for_each_file(Fun, Files) -> [Fun(File) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> @@ -1985,26 +1985,22 @@ transform_dir(BaseDir, Store, TransformFun) -> true -> throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(OldFileList, - fun (File) -> + for_each_file(fun (File) -> transform_msg_file(filename:join(Dir, File), filename:join(TmpDir, File), TransformFun) - end), - for_each_file(OldFileList, - fun (File) -> + end, OldFileList), + for_each_file(fun (File) -> file:delete(filename:join(Dir, File)) - end), + end, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(NewFileList, - fun (File) -> + for_each_file(fun (File) -> file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) - end), - for_each_file(NewFileList, - fun (File) -> + end, NewFileList), + for_each_file(fun (File) -> file:delete(filename:join(TmpDir, File)) - end), + end, NewFileList), ok = file:del_dir(TmpDir) end. -- cgit v1.2.1 From 332bdddf91562609deb1a9eef8fc057d585b5006 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 10:35:25 +0000 Subject: Don't fail if the lock file does not exist (on Debian). --- packaging/common/rabbitmq-server.init | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 21e22eab..091bdf9d 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -48,7 +48,9 @@ start_rabbitmq () { case "$RETVAL" in 0) echo SUCCESS - [ -n "$LOCK_FILE" ] && touch $LOCK_FILE + if [ -n "$LOCK_FILE" ] ; then + touch $LOCK_FILE + fi ;; *) echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\} @@ -66,7 +68,9 @@ stop_rabbitmq () { RETVAL=$? set -e if [ $RETVAL = 0 ] ; then - [ -n "$LOCK_FILE" ] && rm -rf $LOCK_FILE + if [ -n "$LOCK_FILE" ] ; then + rm -f $LOCK_FILE + fi else echo FAILED - check ${INIT_LOG_DIR}/shutdown_log, _err fi -- cgit v1.2.1 From ab849ebe461a00d5b794466a99b1144fd1c209d8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 10:42:24 +0000 Subject: According to http://refspecs.freestandards.org/LSB_3.1.1/LSB-Core-generic/LSB-Core-generic/iniscrptact.html this is a success. --- packaging/common/rabbitmq-server.init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 091bdf9d..bf59ca32 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -36,7 +36,7 @@ start_rabbitmq () { status_rabbitmq quiet if [ $RETVAL = 0 ] ; then echo RabbitMQ is currently running - RETVAL=1 + RETVAL=0 else RETVAL=0 set +e -- cgit v1.2.1 From cf199c4ed528fc93faf86964c84f9484d4a769da Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 10:43:34 +0000 Subject: No need to reassign. --- packaging/common/rabbitmq-server.init | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index bf59ca32..c1647dc5 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -36,7 +36,6 @@ start_rabbitmq () { status_rabbitmq quiet if [ $RETVAL = 0 ] ; then echo RabbitMQ is currently running - RETVAL=0 else RETVAL=0 set +e -- cgit v1.2.1 From f300f1594b4224a4c20e1f39a138f3471f6e469e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 11:25:59 +0000 Subject: Shorten transform_dir. --- src/rabbit_msg_store.erl | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index a9d1e210..5bfd48fb 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1975,32 +1975,27 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(Fun, Files) -> - [Fun(File) || File <- Files]. +for_each_file(D) -> + fun(Fun, Files) -> [Fun(filename:join(D, File)) || File <- Files] end. + +for_each_file(D1, D2) -> + fun(Fun, Files) -> [Fun(filename:join(D1, File), + filename:join(D2, File)) || File <- Files] end. transform_dir(BaseDir, Store, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Store)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), + TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, case filelib:is_dir(TmpDir) of - true -> throw({error, transform_failed_previously}); + true -> + throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(fun (File) -> - transform_msg_file(filename:join(Dir, File), - filename:join(TmpDir, File), - TransformFun) - end, OldFileList), - for_each_file(fun (File) -> - file:delete(filename:join(Dir, File)) - end, OldFileList), + (for_each_file(Dir, TmpDir))(TransformFile, OldFileList), + (for_each_file(Dir) )(fun file:delete/1, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(fun (File) -> - file:copy(filename:join(TmpDir, File), - filename:join(Dir, File)) - end, NewFileList), - for_each_file(fun (File) -> - file:delete(filename:join(TmpDir, File)) - end, NewFileList), + (for_each_file(TmpDir, Dir))(fun file:copy/2, NewFileList), + (for_each_file(TmpDir) )(fun file:delete/1, NewFileList), ok = file:del_dir(TmpDir) end. -- cgit v1.2.1 From 789f49a33719c34d11c4385e67e46ac6bc081617 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 11:43:14 +0000 Subject: Matthias points out this does not need to be second order. --- src/rabbit_msg_store.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 5bfd48fb..fd3027e9 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1975,12 +1975,11 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(D) -> - fun(Fun, Files) -> [Fun(filename:join(D, File)) || File <- Files] end. +for_each_file(D, Fun, Files) -> + [Fun(filename:join(D, File)) || File <- Files]. -for_each_file(D1, D2) -> - fun(Fun, Files) -> [Fun(filename:join(D1, File), - filename:join(D2, File)) || File <- Files] end. +for_each_file(D1, D2, Fun, Files) -> + [Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Store)), @@ -1991,11 +1990,11 @@ transform_dir(BaseDir, Store, TransformFun) -> throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - (for_each_file(Dir, TmpDir))(TransformFile, OldFileList), - (for_each_file(Dir) )(fun file:delete/1, OldFileList), + for_each_file(Dir, TmpDir, TransformFile, OldFileList), + for_each_file(Dir, fun file:delete/1, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - (for_each_file(TmpDir, Dir))(fun file:copy/2, NewFileList), - (for_each_file(TmpDir) )(fun file:delete/1, NewFileList), + for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), + for_each_file(TmpDir, fun file:delete/1, NewFileList), ok = file:del_dir(TmpDir) end. -- cgit v1.2.1 From 46b79df30bb8eac1905dfd2cc73e760f753b99f6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 12:03:22 +0000 Subject: Aesthetics --- src/rabbit_msg_store.erl | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index fd3027e9..3d7411a9 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1986,16 +1986,14 @@ transform_dir(BaseDir, Store, TransformFun) -> TmpDir = filename:join(Dir, ?TRANSFORM_TMP), TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, case filelib:is_dir(TmpDir) of - true -> - throw({error, transform_failed_previously}); - false -> - OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(Dir, TmpDir, TransformFile, OldFileList), - for_each_file(Dir, fun file:delete/1, OldFileList), - NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), - for_each_file(TmpDir, fun file:delete/1, NewFileList), - ok = file:del_dir(TmpDir) + true -> throw({error, transform_failed_previously}); + false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), + for_each_file(Dir, TmpDir, TransformFile, OldFileList), + for_each_file(Dir, fun file:delete/1, OldFileList), + NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), + for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), + for_each_file(TmpDir, fun file:delete/1, NewFileList), + ok = file:del_dir(TmpDir) end. transform_msg_file(FileOld, FileNew, TransformFun) -> -- cgit v1.2.1 From c34c4592aaf59da9771a6a9a51de076d11da90a8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 16:46:39 +0000 Subject: Revert where rabbit:prepare happens. --- src/rabbit_prelaunch.erl | 2 ++ src/rabbit_upgrade.erl | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 612aec80..3283e8fd 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -235,6 +235,8 @@ post_process_script(ScriptFile) -> {error, {failed_to_load_script, Reason}} end. +process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> + [{apply,{rabbit,prepare,[]}}, Entry]; process_entry(Entry = {apply,{application,start_boot,[mnesia,permanent]}}) -> [{apply,{rabbit_upgrade,maybe_upgrade_mnesia,[]}}, Entry]; process_entry(Entry) -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index c8d2ae87..73b9bb0e 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -46,7 +46,6 @@ %% ------------------------------------------------------------------- maybe_upgrade_mnesia() -> - rabbit:prepare(), Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of version_not_available -> -- cgit v1.2.1 From 6cae135624ca1ae276ec89066593fb11683021d5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 16:49:07 +0000 Subject: Rename --- src/rabbit_upgrade.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 73b9bb0e..da735b83 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -58,17 +58,17 @@ maybe_upgrade_mnesia() -> [] -> ok; Upgrades -> - case am_i_upgrader(Nodes) of - true -> primary_upgrade(Upgrades, Nodes); - false -> non_primary_upgrade(Nodes) + case upgrade_mode(Nodes) of + primary -> primary_upgrade(Upgrades, Nodes); + secondary -> non_primary_upgrade(Nodes) end end. -am_i_upgrader(Nodes) -> +upgrade_mode(Nodes) -> case nodes_running(Nodes) of [] -> case am_i_disc_node() of - true -> true; + true -> primary; false -> die("Cluster upgrade needed but this is a ram " "node.~n Please start any of the disc nodes " "first.", []) @@ -85,7 +85,7 @@ am_i_upgrader(Nodes) -> ClusterVersion -> %% The other node(s) have upgraded already, I am not the %% upgrader - false; + secondary; MyVersion -> %% The other node(s) are running an unexpected version. die("Cluster upgrade needed but other nodes are " -- cgit v1.2.1 From b978524f5b06030cda66634a9e17cdca7dcb4fb7 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 17:08:08 +0000 Subject: Prose --- src/rabbit_upgrade.erl | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index da735b83..0fdb973b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -45,6 +45,47 @@ %% ------------------------------------------------------------------- +%% The upgrade logic is quite involved, due to the existence of +%% clusters. +%% +%% Firstly, we have two different types of upgrades to do: Mnesia and +%% everythinq else. Mnesia upgrades need to only be done by one node +%% in the cluster (we treat a non-clustered node as a single-node +%% cluster). This is the primary upgrader. The other upgrades need to +%% be done by all nodes. +%% +%% The primary upgrader has to start first (and do its Mnesia +%% upgrades). Secondary upgraders need to reset their Mnesia database +%% and then rejoin the cluster. They can't do the Mnesia upgrades as +%% well and then merge databases since the cookie for each table will +%% end up different and the merge will fail. +%% +%% This in turn means that we need to determine whether we are the +%% primary or secondary upgrader *before* Mnesia comes up. If we +%% didn't then the secondary upgrader would try to start Mnesia, and +%% either hang waiting for a node which is not yet up, or fail since +%% its schema differs from the other nodes in the cluster. +%% +%% Also, the primary upgrader needs to start Mnesia to do its +%% upgrades, but needs to forcibly load tables rather than wait for +%% them (in case it was not the last node to shut down, in which case +%% it would wait forever). +%% +%% This in turn means that maybe_upgrade_mnesia/0 has to be patched +%% into the boot process by prelaunch before the mnesia application is +%% started. By the time Mnesia is started the upgrades have happened +%% (on the primary), or Mnesia has been reset (on the secondary) and +%% rabbit_mnesia:init_db/2 can then make the node rejoin the clister +%% in the normal way. +%% +%% The non-mnesia upgrades are then triggered by +%% rabbit_mnesia:init_db/2. Of course, it's possible for a given +%% upgrade process to only require Mnesia upgrades, or only require +%% non-Mnesia upgrades. In the latter case no Mnesia resets and +%% reclusterings occur. + +%% ------------------------------------------------------------------- + maybe_upgrade_mnesia() -> Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of -- cgit v1.2.1 From ea73a62e8b8a86883fb8683d7f61a5693a519f46 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 17:58:58 +0000 Subject: (Untested) Record the nodes that were up when we shut down. --- src/rabbit.erl | 1 + src/rabbit_mnesia.erl | 36 +++++++++++++++++++++++++++++++++++- src/rabbit_upgrade.erl | 46 +++++++++++++++++++++++++++++++++++++--------- 3 files changed, 73 insertions(+), 10 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 1beed5c1..ffb6610d 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -203,6 +203,7 @@ start() -> end. stop() -> + rabbit_mnesia:record_running_disc_nodes(), ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e7da6a43..3f7fc0d8 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -21,7 +21,9 @@ cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, - create_cluster_nodes_config/1, read_cluster_nodes_config/0]). + create_cluster_nodes_config/1, read_cluster_nodes_config/0, + record_running_disc_nodes/0, read_previous_run_disc_nodes/0, + delete_previous_run_disc_nodes/0, running_nodes_filename/0]). -export([table_names/0]). @@ -57,6 +59,10 @@ -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). -spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). -spec(read_cluster_nodes_config/0 :: () -> [node()]). +-spec(record_running_disc_nodes/0 :: () -> 'ok'). +-spec(read_previous_run_disc_nodes/0 :: () -> [node()]). +-spec(delete_previous_run_disc_nodes/0 :: () -> 'ok'). +-spec(running_nodes_filename/0 :: () -> file:filename()). -endif. @@ -349,6 +355,34 @@ delete_cluster_nodes_config() -> FileName, Reason}}) end. +running_nodes_filename() -> + dir() ++ "/nodes_running_at_shutdown". + +record_running_disc_nodes() -> + FileName = running_nodes_filename(), + Nodes = rabbit_mnesia:nodes_of_type(disc_copies) -- [node()], + %% Don't check the result: we're shutting down anyway and this is + %% a best-effort-basis. + rabbit_misc:write_term_file(FileName, [Nodes]). + +read_previous_run_disc_nodes() -> + FileName = running_nodes_filename(), + case rabbit_misc:read_term_file(FileName) of + {ok, [Nodes]} -> Nodes; + {error, enoent} -> []; + {error, Reason} -> throw({error, {cannot_read_previous_nodes_file, + FileName, Reason}}) + end. + +delete_previous_run_disc_nodes() -> + FileName = running_nodes_filename(), + case file:delete(FileName) of + ok -> ok; + {error, enoent} -> ok; + {error, Reason} -> throw({error, {cannot_delete_previous_nodes_file, + FileName, Reason}}) + end. + %% Take a cluster node config and create the right kind of node - a %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0fdb973b..23770686 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -49,8 +49,8 @@ %% clusters. %% %% Firstly, we have two different types of upgrades to do: Mnesia and -%% everythinq else. Mnesia upgrades need to only be done by one node -%% in the cluster (we treat a non-clustered node as a single-node +%% everythinq else. Mnesia upgrades must only be done by one node in +%% the cluster (we treat a non-clustered node as a single-node %% cluster). This is the primary upgrader. The other upgrades need to %% be done by all nodes. %% @@ -75,7 +75,7 @@ %% into the boot process by prelaunch before the mnesia application is %% started. By the time Mnesia is started the upgrades have happened %% (on the primary), or Mnesia has been reset (on the secondary) and -%% rabbit_mnesia:init_db/2 can then make the node rejoin the clister +%% rabbit_mnesia:init_db/2 can then make the node rejoin the cluster %% in the normal way. %% %% The non-mnesia upgrades are then triggered by @@ -83,6 +83,22 @@ %% upgrade process to only require Mnesia upgrades, or only require %% non-Mnesia upgrades. In the latter case no Mnesia resets and %% reclusterings occur. +%% +%% The primary upgrader needs to be a disc node. Ideally we would like +%% it to be the last disc node to shut down (since otherwise there's a +%% risk of data loss). On each node we therefore record the disc nodes +%% that were still running when we shut down. A disc node that knows +%% other nodes were up when it shut down, or a ram node, will refuse +%% to be the primary upgrader, and will thus not start when upgrades +%% are needed. +%% +%% However, this is racy if several nodes are shut down at once. Since +%% rabbit records the running nodes, and shuts down before mnesia, the +%% race manifests as all disc nodes thinking they are not the primary +%% upgrader. Therefore the user can remove the record of the last disc +%% node to shut down to get things going again. This may lose any +%% mnesia changes that happened after the node chosen as the primary +%% upgrader was shut down. %% ------------------------------------------------------------------- @@ -103,16 +119,28 @@ maybe_upgrade_mnesia() -> primary -> primary_upgrade(Upgrades, Nodes); secondary -> non_primary_upgrade(Nodes) end - end. + end, + ok = rabbit_mnesia:delete_previous_run_disc_nodes(). upgrade_mode(Nodes) -> case nodes_running(Nodes) of [] -> - case am_i_disc_node() of - true -> primary; - false -> die("Cluster upgrade needed but this is a ram " - "node.~n Please start any of the disc nodes " - "first.", []) + AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), + case {am_i_disc_node(), AfterUs} of + {true, []} -> + primary; + {true, _} -> + Filename = rabbit_mnesia:running_nodes_filename(), + die("Cluster upgrade needed but other disc nodes shut " + "down after this one.~n Please start one of the " + "disc nodes: ~p first.~n~n Note: if several disc " + "nodes were shut down simultaneously they may all " + "show this message. In which case, remove ~s on one " + "of them and start that.", [AfterUs, Filename]); + {false, _} -> + die("Cluster upgrade needed but this is a ram " + "node.~n Please start one of the disc nodes: " + "~p first.", [AfterUs]) end; [Another|_] -> ClusterVersion = -- cgit v1.2.1 From b30f89113d57a303c52739712408440d75605532 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:11:45 +0000 Subject: Oops, that's not exported. --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 3f7fc0d8..8acb0b02 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -360,7 +360,7 @@ running_nodes_filename() -> record_running_disc_nodes() -> FileName = running_nodes_filename(), - Nodes = rabbit_mnesia:nodes_of_type(disc_copies) -- [node()], + Nodes = nodes_of_type(disc_copies) -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. rabbit_misc:write_term_file(FileName, [Nodes]). -- cgit v1.2.1 From 11a1cc6eac6bd4cde6bd971763348d0384ec2520 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:39:45 +0000 Subject: Fix our idea of which nodes were running when we shut down. --- src/rabbit_mnesia.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 8acb0b02..367eb6f8 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -360,7 +360,10 @@ running_nodes_filename() -> record_running_disc_nodes() -> FileName = running_nodes_filename(), - Nodes = nodes_of_type(disc_copies) -- [node()], + Nodes = sets:to_list( + sets:intersection( + sets:from_list(nodes_of_type(disc_copies)), + sets:from_list(running_clustered_nodes()))) -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. rabbit_misc:write_term_file(FileName, [Nodes]). -- cgit v1.2.1 From a20039bacdf9f9ca06b82e7673a6a423318fb269 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:45:21 +0000 Subject: Make the error messages more readable. --- src/rabbit_upgrade.erl | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 23770686..0c2e4bce 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -109,7 +109,7 @@ maybe_upgrade_mnesia() -> case Nodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " - "< 2.1.1.~n Unfortunately you will need to " + "< 2.1.1.~nUnfortunately you will need to " "rebuild the cluster.", []) end; [] -> @@ -132,15 +132,19 @@ upgrade_mode(Nodes) -> {true, _} -> Filename = rabbit_mnesia:running_nodes_filename(), die("Cluster upgrade needed but other disc nodes shut " - "down after this one.~n Please start one of the " - "disc nodes: ~p first.~n~n Note: if several disc " - "nodes were shut down simultaneously they may all " - "show this message. In which case, remove ~s on one " - "of them and start that.", [AfterUs, Filename]); + "down after this one.~nPlease first start the last " + "disc node to shut down.~nThe disc nodes that were " + "still running when this one shut down are:~n~n" + " ~p~n~nNote: if several disc nodes were shut down " + "simultaneously they may all~nshow this message. " + "In which case, remove the lock file on one of them " + "and~nstart that node. The lock file on this node " + "is:~n~n ~s ", + [AfterUs, Filename]); {false, _} -> - die("Cluster upgrade needed but this is a ram " - "node.~n Please start one of the disc nodes: " - "~p first.", [AfterUs]) + die("Cluster upgrade needed but this is a ram node.~n" + "Please first start the last disc node to shut down.", + []) end; [Another|_] -> ClusterVersion = @@ -176,7 +180,7 @@ die(Msg, Args) -> %% straight out into do_boot, generating an erl_crash.dump %% and displaying any error message in a confusing way. error_logger:error_msg(Msg, Args), - io:format("~n~n** " ++ Msg ++ " **~n~n~n", Args), + io:format("~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args), error_logger:logfile(close), halt(1). -- cgit v1.2.1 From f673f3919cad23798116ca2f63de64a5b36b03b4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:58:18 +0000 Subject: Retain ram-nodeness when upgrading. --- src/rabbit_upgrade.erl | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0c2e4bce..56dab3e9 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -103,10 +103,11 @@ %% ------------------------------------------------------------------- maybe_upgrade_mnesia() -> - Nodes = rabbit_mnesia:all_clustered_nodes(), + AllNodes = rabbit_mnesia:all_clustered_nodes(), + KnownDiscNodes = rabbit_mnesia:read_cluster_nodes_config(), case upgrades_required(mnesia) of version_not_available -> - case Nodes of + case AllNodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " "< 2.1.1.~nUnfortunately you will need to " @@ -115,18 +116,18 @@ maybe_upgrade_mnesia() -> [] -> ok; Upgrades -> - case upgrade_mode(Nodes) of - primary -> primary_upgrade(Upgrades, Nodes); - secondary -> non_primary_upgrade(Nodes) + case upgrade_mode(AllNodes, KnownDiscNodes) of + primary -> primary_upgrade(Upgrades, AllNodes); + secondary -> secondary_upgrade(KnownDiscNodes) end end, ok = rabbit_mnesia:delete_previous_run_disc_nodes(). -upgrade_mode(Nodes) -> - case nodes_running(Nodes) of +upgrade_mode(AllNodes, KnownDiscNodes) -> + case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), - case {am_i_disc_node(), AfterUs} of + case {am_i_disc_node(KnownDiscNodes), AfterUs} of {true, []} -> primary; {true, _} -> @@ -167,10 +168,10 @@ upgrade_mode(Nodes) -> end end. -am_i_disc_node() -> +am_i_disc_node(KnownDiscNodes) -> %% The cluster config does not list all disc nodes, but it will list us %% if we're one. - case rabbit_mnesia:read_cluster_nodes_config() of + case KnownDiscNodes of [] -> true; DiscNodes -> lists:member(node(), DiscNodes) end. @@ -204,10 +205,10 @@ primary_upgrade(Upgrades, Nodes) -> force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. -non_primary_upgrade(Nodes) -> +secondary_upgrade(KnownDiscNodes) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), - ok = rabbit_mnesia:create_cluster_nodes_config(Nodes), + ok = rabbit_mnesia:create_cluster_nodes_config(KnownDiscNodes), write_version(mnesia), ok. -- cgit v1.2.1 From 2bf5a24342c350511ec5ec9de6a1f1c1e8496e64 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 12:01:49 +0000 Subject: Implement try-restart and condrestart as intended --- packaging/common/rabbitmq-server.init | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index c1647dc5..93c9e0e7 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -101,6 +101,16 @@ rotate_logs_rabbitmq() { set -e } +restart_running_rabbitmq () { + status_rabbitmq quiet + if [ $RETVAL = 0 ] ; then + restart_rabbitmq + else + echo RabbitMQ is not runnning + RETVAL=0 + fi +} + restart_rabbitmq() { stop_rabbitmq start_rabbitmq @@ -124,11 +134,16 @@ case "$1" in echo -n "Rotating log files for $DESC: " rotate_logs_rabbitmq ;; - force-reload|reload|restart|condrestart|try-restart) + force-reload|reload|restart) echo -n "Restarting $DESC: " restart_rabbitmq echo "$NAME." ;; + condrestart|try-restart) + echo -n "Restarting $DESC: " + restart_running_rabbitmq + echo "$NAME." + ;; *) echo "Usage: $0 {start|stop|status|rotate-logs|restart|condrestart|try-restart|reload|force-reload}" >&2 RETVAL=1 -- cgit v1.2.1 From e5f82cb1200cb97b097306ab35d26250afdda777 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 13:33:43 +0000 Subject: Remove condrestart target --- packaging/common/rabbitmq-server.init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 93c9e0e7..916dee6f 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -139,7 +139,7 @@ case "$1" in restart_rabbitmq echo "$NAME." ;; - condrestart|try-restart) + try-restart) echo -n "Restarting $DESC: " restart_running_rabbitmq echo "$NAME." -- cgit v1.2.1 From 1bafead0212d17e41198121a83ed44ea1bd506b8 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 17:01:03 +0000 Subject: Maybe monitor queues on consume, maybe unmonitor on cancel --- src/rabbit_channel.erl | 65 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index a6790b6c..346ec371 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -33,9 +33,9 @@ start_limiter_fun, transaction_id, tx_participants, next_tag, uncommitted_ack_q, unacked_message_q, user, virtual_host, most_recently_declared_queue, - consumer_mapping, blocking, queue_collector_pid, stats_timer, - confirm_enabled, publish_seqno, unconfirmed, confirmed, - capabilities}). + consumer_mapping, blocking, consumer_monitors, queue_collector_pid, + stats_timer, confirm_enabled, publish_seqno, unconfirmed, + confirmed, capabilities}). -define(MAX_PERMISSION_CACHE_SIZE, 12). @@ -171,6 +171,7 @@ init([Channel, ReaderPid, WriterPid, User, VHost, Capabilities, CollectorPid, most_recently_declared_queue = <<>>, consumer_mapping = dict:new(), blocking = dict:new(), + consumer_monitors = dict:new(), queue_collector_pid = CollectorPid, stats_timer = StatsTimer, confirm_enabled = false, @@ -646,9 +647,11 @@ handle_method(#'basic.consume'{queue = QueueNameBin, no_ack = NoAck, exclusive = ExclusiveConsume, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid, - limiter_pid = LimiterPid, - consumer_mapping = ConsumerMapping }) -> + _, State = #ch{reader_pid = ReaderPid, + limiter_pid = LimiterPid, + consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, + capabilities = Capabilities}) -> case dict:find(ConsumerTag, ConsumerMapping) of error -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), @@ -665,18 +668,31 @@ handle_method(#'basic.consume'{queue = QueueNameBin, case rabbit_amqqueue:with_exclusive_access_or_die( QueueName, ReaderPid, fun (Q) -> - rabbit_amqqueue:basic_consume( - Q, NoAck, self(), LimiterPid, - ActualConsumerTag, ExclusiveConsume, - ok_msg(NoWait, #'basic.consume_ok'{ - consumer_tag = ActualConsumerTag})) + {rabbit_amqqueue:basic_consume( + Q, NoAck, self(), LimiterPid, + ActualConsumerTag, ExclusiveConsume, + ok_msg(NoWait, #'basic.consume_ok'{ + consumer_tag = ActualConsumerTag})), + Q#amqqueue.pid} end) of - ok -> + {ok, QPid} -> + {ConsumerMonitors1, MRef} = + case rabbit_misc:table_lookup( + Capabilities, + <<"consumer_death_notification">>) of + {bool, true} -> + MRef1 = erlang:monitor(process, QPid), + {dict:store(MRef1, ActualConsumerTag, + ConsumerMonitors), MRef1}; + _ -> + {ConsumerMonitors, undefined} + end, {noreply, State#ch{consumer_mapping = dict:store(ActualConsumerTag, - QueueName, - ConsumerMapping)}}; - {error, exclusive_consume_unavailable} -> + {QueueName, MRef}, + ConsumerMapping), + consumer_monitors = ConsumerMonitors1}}; + {{error, exclusive_consume_unavailable}, _QPid} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", [rabbit_misc:rs(QueueName)]) @@ -689,16 +705,23 @@ handle_method(#'basic.consume'{queue = QueueNameBin, handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait}, - _, State = #ch{consumer_mapping = ConsumerMapping }) -> + _, State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors}) -> OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag}, case dict:find(ConsumerTag, ConsumerMapping) of error -> %% Spec requires we ignore this situation. return_ok(State, NoWait, OkMsg); - {ok, QueueName} -> - NewState = State#ch{consumer_mapping = - dict:erase(ConsumerTag, - ConsumerMapping)}, + {ok, {QueueName, MRef}} -> + ConsumerMonitors1 = + case MRef of + undefined -> ConsumerMonitors; + _ -> true = erlang:demonitor(MRef), + dict:erase(MRef, ConsumerMonitors) + end, + NewState = State#ch{consumer_mapping = dict:erase(ConsumerTag, + ConsumerMapping), + consumer_monitors = ConsumerMonitors1}, case rabbit_amqqueue:with( QueueName, fun (Q) -> @@ -1208,7 +1231,7 @@ limit_queues(LPid, #ch{consumer_mapping = Consumers}) -> consumer_queues(Consumers) -> [QPid || QueueName <- sets:to_list( - dict:fold(fun (_ConsumerTag, QueueName, S) -> + dict:fold(fun (_ConsumerTag, {QueueName, _MRef}, S) -> sets:add_element(QueueName, S) end, sets:new(), Consumers)), case rabbit_amqqueue:lookup(QueueName) of -- cgit v1.2.1 From eec95bf77fd15eb16e44e9938e3cd1c857105ec2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 17:20:01 +0000 Subject: Tidy up on DOWN and emit basic.cancel --- src/rabbit_channel.erl | 46 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 346ec371..1da8c959 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -281,20 +281,15 @@ handle_cast({confirm, MsgSeqNos, From}, State) -> handle_info(timeout, State) -> noreply(State); -handle_info({'DOWN', _MRef, process, QPid, Reason}, - State = #ch{unconfirmed = UC}) -> - %% TODO: this does a complete scan and partial rebuild of the - %% tree, which is quite efficient. To do better we'd need to - %% maintain a secondary mapping, from QPids to MsgSeqNos. - {MXs, UC1} = remove_queue_unconfirmed( - gb_trees:next(gb_trees:iterator(UC)), QPid, - {[], UC}, State), - erase_queue_stats(QPid), - State1 = case Reason of - normal -> record_confirms(MXs, State#ch{unconfirmed = UC1}); - _ -> send_nacks(MXs, State#ch{unconfirmed = UC1}) - end, - noreply(queue_blocked(QPid, State1)). +handle_info({'DOWN', MRef, process, QPid, Reason}, + State = #ch{consumer_monitors = ConsumerMonitors}) -> + noreply( + case dict:find(MRef, ConsumerMonitors) of + error -> + handle_non_consumer_down(QPid, Reason, State); + {ok, ConsumerTag} -> + handle_consumer_down(MRef, ConsumerTag, State) + end). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> ok = clear_permission_cache(), @@ -1061,6 +1056,29 @@ handle_method(_MethodRecord, _Content, _State) -> %%---------------------------------------------------------------------------- +handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed = UC}) -> + %% TODO: this does a complete scan and partial rebuild of the + %% tree, which is quite efficient. To do better we'd need to + %% maintain a secondary mapping, from QPids to MsgSeqNos. + {MXs, UC1} = remove_queue_unconfirmed( + gb_trees:next(gb_trees:iterator(UC)), QPid, + {[], UC}, State), + erase_queue_stats(QPid), + State1 = case Reason of + normal -> record_confirms(MXs, State#ch{unconfirmed = UC1}); + _ -> send_nacks(MXs, State#ch{unconfirmed = UC1}) + end, + queue_blocked(QPid, State1). + +handle_consumer_down(MRef, ConsumerTag, + State = #ch{consumer_monitors = ConsumerMonitors, + writer_pid = WriterPid}) -> + ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors), + Cancel = #'basic.cancel'{consumer_tag = ConsumerTag, + nowait = true}, + ok = rabbit_writer:send_command(WriterPid, Cancel), + State#ch{consumer_monitors = ConsumerMonitors1}. + binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath, -- cgit v1.2.1 From 58947e087dc17246a3a973b7a912321ffe3e0804 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 17:32:55 +0000 Subject: Add to our own server_properties (informational only) --- src/rabbit_reader.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index e9ff97f9..be5a90af 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -249,9 +249,10 @@ server_properties(Protocol) -> NormalizedConfigServerProps). server_capabilities(rabbit_framing_amqp_0_9_1) -> - [{<<"publisher_confirms">>, bool, true}, - {<<"exchange_exchange_bindings">>, bool, true}, - {<<"basic.nack">>, bool, true}]; + [{<<"publisher_confirms">>, bool, true}, + {<<"exchange_exchange_bindings">>, bool, true}, + {<<"basic.nack">>, bool, true}, + {<<"consumer_death_notification">>, bool, true}]; server_capabilities(_) -> []. -- cgit v1.2.1 From 78dad489959a02769a7452ea9b6e604de9dfeec6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Feb 2011 12:32:12 +0000 Subject: Changes by ryandesign in r75667 of Macports' SVN. SVN commit msg was: rabbitmq-server: maintainer update to 2.3.1; see #28254 Additionally: 'I simplified how the manpages were installed (using "`xinstall -W`" to install multiple files to the same destination directory at once), and simplified how the checksums were specified (listing each filename only once).' --- packaging/macports/Portfile.in | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 862a0d1a..67ebcf78 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -23,12 +23,14 @@ distfiles ${name}-${version}${extract.suffix} \ ${name}-generic-unix-${version}${extract.suffix} checksums \ - ${name}-${version}${extract.suffix} md5 @md5-src@ \ - ${name}-${version}${extract.suffix} sha1 @sha1-src@ \ - ${name}-${version}${extract.suffix} rmd160 @rmd160-src@ \ - ${name}-generic-unix-${version}${extract.suffix} md5 @md5-bin@ \ - ${name}-generic-unix-${version}${extract.suffix} sha1 @sha1-bin@ \ - ${name}-generic-unix-${version}${extract.suffix} rmd160 @rmd160-bin@ + ${name}-${version}${extract.suffix} \ + md5 @md5-src@ \ + sha1 @sha1-src@ \ + rmd160 @rmd160-src@ \ + ${name}-generic-unix-${version}${extract.suffix} \ + md5 @md5-bin@ \ + sha1 @sha1-bin@ \ + rmd160 @rmd160-bin@ depends_lib port:erlang depends_build port:libxslt @@ -102,10 +104,8 @@ post-destroot { file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmq-server file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmqctl - file copy ${mansrc}/man1/rabbitmq-multi.1.gz ${mandest}/man1/ - file copy ${mansrc}/man1/rabbitmq-server.1.gz ${mandest}/man1/ - file copy ${mansrc}/man1/rabbitmqctl.1.gz ${mandest}/man1/ - file copy ${mansrc}/man5/rabbitmq-env.conf.5.gz ${mandest}/man5/ + xinstall -m 644 -W ${mansrc}/man1 rabbitmq-multi.1.gz rabbitmq-server.1.gz rabbitmqctl.1.gz ${mandest}/man1/ + xinstall -m 644 -W ${mansrc}/man5 rabbitmq.conf.5.gz ${mandest}/man5/ } pre-install { -- cgit v1.2.1 From d8c2900d40317202aa509ef18116c7058ddc7f16 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Feb 2011 17:42:56 +0000 Subject: Make the consumer mapping store the queues, not the queuenames --- src/rabbit_channel.erl | 47 +++++++++++++++++------------------------------ 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 2fc19256..ff8ff800 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -680,9 +680,9 @@ handle_method(#'basic.consume'{queue = QueueNameBin, ActualConsumerTag, ExclusiveConsume, ok_msg(NoWait, #'basic.consume_ok'{ consumer_tag = ActualConsumerTag})), - Q#amqqueue.pid} + Q} end) of - {ok, QPid} -> + {ok, Q = #amqqueue{pid = QPid}} -> {ConsumerMonitors1, MRef} = case rabbit_misc:table_lookup( Capabilities, @@ -696,10 +696,10 @@ handle_method(#'basic.consume'{queue = QueueNameBin, end, {noreply, State#ch{consumer_mapping = dict:store(ActualConsumerTag, - {QueueName, MRef}, + {Q, MRef}, ConsumerMapping), consumer_monitors = ConsumerMonitors1}}; - {{error, exclusive_consume_unavailable}, _QPid} -> + {{error, exclusive_consume_unavailable}, _Q} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", [rabbit_misc:rs(QueueName)]) @@ -719,7 +719,7 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, error -> %% Spec requires we ignore this situation. return_ok(State, NoWait, OkMsg); - {ok, {QueueName, MRef}} -> + {ok, {Q, MRef}} -> ConsumerMonitors1 = case MRef of undefined -> ConsumerMonitors; @@ -729,21 +729,15 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, NewState = State#ch{consumer_mapping = dict:erase(ConsumerTag, ConsumerMapping), consumer_monitors = ConsumerMonitors1}, - case rabbit_amqqueue:with( - QueueName, - fun (Q) -> - %% In order to ensure that no more messages - %% are sent to the consumer after the - %% cancel_ok has been sent, we get the - %% queue process to send the cancel_ok on - %% our behalf. If we were sending the - %% cancel_ok ourselves it might overtake a - %% message sent previously by the queue. - rabbit_amqqueue:basic_cancel( - Q, self(), ConsumerTag, - ok_msg(NoWait, #'basic.cancel_ok'{ - consumer_tag = ConsumerTag})) - end) of + %% In order to ensure that no more messages are sent to + %% the consumer after the cancel_ok has been sent, we get + %% the queue process to send the cancel_ok on our + %% behalf. If we were sending the cancel_ok ourselves it + %% might overtake a message sent previously by the queue. + case rabbit_amqqueue:basic_cancel( + Q, self(), ConsumerTag, + ok_msg(NoWait, #'basic.cancel_ok'{ + consumer_tag = ConsumerTag})) of ok -> {noreply, NewState}; {error, not_found} -> @@ -1262,16 +1256,9 @@ limit_queues(LPid, #ch{consumer_mapping = Consumers}) -> rabbit_amqqueue:limit_all(consumer_queues(Consumers), self(), LPid). consumer_queues(Consumers) -> - [QPid || QueueName <- - sets:to_list( - dict:fold(fun (_ConsumerTag, {QueueName, _MRef}, S) -> - sets:add_element(QueueName, S) - end, sets:new(), Consumers)), - case rabbit_amqqueue:lookup(QueueName) of - {ok, Q} -> QPid = Q#amqqueue.pid, true; - %% queue has been deleted in the meantime - {error, not_found} -> QPid = none, false - end]. + lists:usort([QPid || + {_Key, {#amqqueue{pid = QPid}, _MRef}} + <- dict:to_list(Consumers)]). %% tell the limiter about the number of acks that have been received %% for messages delivered to subscribed consumers, but not acks for -- cgit v1.2.1 From 0a841c886b0941b534de7f5fb32405e910c44173 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Feb 2011 18:28:16 +0000 Subject: Set up the monitor only when we see the basic.consume_ok coming back through otherwise there's a risk of sending out the cancel before we've sent out the consume_ok, which would surprise clients. This is further complicated by the fact NoWait with basic.consume... --- src/rabbit_channel.erl | 74 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 26 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index ff8ff800..b8788983 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -241,6 +241,11 @@ handle_cast({flushed, QPid}, State) -> handle_cast(terminate, State) -> {stop, normal, State}; +handle_cast({command, #'basic.consume_ok'{consumer_tag = ConsumerTag} = Msg}, + State = #ch{writer_pid = WriterPid}) -> + ok = rabbit_writer:send_command(WriterPid, Msg), + noreply(monitor_consumer(ConsumerTag, State)); + handle_cast({command, Msg}, State = #ch{writer_pid = WriterPid}) -> ok = rabbit_writer:send_command(WriterPid, Msg), noreply(State); @@ -656,9 +661,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, nowait = NoWait}, _, State = #ch{reader_pid = ReaderPid, limiter_pid = LimiterPid, - consumer_mapping = ConsumerMapping, - consumer_monitors = ConsumerMonitors, - capabilities = Capabilities}) -> + consumer_mapping = ConsumerMapping}) -> case dict:find(ConsumerTag, ConsumerMapping) of error -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), @@ -682,23 +685,13 @@ handle_method(#'basic.consume'{queue = QueueNameBin, consumer_tag = ActualConsumerTag})), Q} end) of - {ok, Q = #amqqueue{pid = QPid}} -> - {ConsumerMonitors1, MRef} = - case rabbit_misc:table_lookup( - Capabilities, - <<"consumer_death_notification">>) of - {bool, true} -> - MRef1 = erlang:monitor(process, QPid), - {dict:store(MRef1, ActualConsumerTag, - ConsumerMonitors), MRef1}; - _ -> - {ConsumerMonitors, undefined} - end, - {noreply, State#ch{consumer_mapping = - dict:store(ActualConsumerTag, - {Q, MRef}, - ConsumerMapping), - consumer_monitors = ConsumerMonitors1}}; + {ok, Q} -> + State1 = State#ch{consumer_mapping = + dict:store(ActualConsumerTag, + {Q, undefined}, + ConsumerMapping)}, + {noreply, + maybe_monitor_consumer(NoWait, ActualConsumerTag, State1)}; {{error, exclusive_consume_unavailable}, _Q} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -734,10 +727,14 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, %% the queue process to send the cancel_ok on our %% behalf. If we were sending the cancel_ok ourselves it %% might overtake a message sent previously by the queue. - case rabbit_amqqueue:basic_cancel( - Q, self(), ConsumerTag, - ok_msg(NoWait, #'basic.cancel_ok'{ - consumer_tag = ConsumerTag})) of + case rabbit_misc:with_exit_handler( + fun () -> {error, not_found} end, + fun () -> + rabbit_amqqueue:basic_cancel( + Q, self(), ConsumerTag, + ok_msg(NoWait, #'basic.cancel_ok'{ + consumer_tag = ConsumerTag})) + end) of ok -> {noreply, NewState}; {error, not_found} -> @@ -1062,6 +1059,28 @@ handle_method(_MethodRecord, _Content, _State) -> %%---------------------------------------------------------------------------- +maybe_monitor_consumer(true, ConsumerTag, State) -> + monitor_consumer(ConsumerTag, State); +maybe_monitor_consumer(false, _ConsumerTag, State) -> + State. + +monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, + capabilities = Capabilities}) -> + case {dict:find(ConsumerTag, ConsumerMapping), + rabbit_misc:table_lookup( + Capabilities, <<"consumer_death_notification">>)} of + {{ok, {#amqqueue{pid = QPid} = Q, undefined}}, {bool, true}} -> + MRef = erlang:monitor(process, QPid), + State#ch{consumer_mapping = + dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), + consumer_monitors = + dict:store(MRef, ConsumerTag, ConsumerMonitors)}; + _X -> + %% either already received the cancel or incapable client + State + end. + handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> MsgSeqNos = case gb_trees:lookup(QPid, UQM) of {value, MsgSet} -> gb_sets:to_list(MsgSet); @@ -1080,13 +1099,16 @@ handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> queue_blocked(QPid, State3). handle_consumer_down(MRef, ConsumerTag, - State = #ch{consumer_monitors = ConsumerMonitors, + State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, writer_pid = WriterPid}) -> + ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping), ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors), Cancel = #'basic.cancel'{consumer_tag = ConsumerTag, nowait = true}, ok = rabbit_writer:send_command(WriterPid, Cancel), - State#ch{consumer_monitors = ConsumerMonitors1}. + State#ch{consumer_mapping = ConsumerMapping1, + consumer_monitors = ConsumerMonitors1}. binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, -- cgit v1.2.1 From b4c3bf3e2b677325d05b0cce115214f79aa362b9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Feb 2011 18:40:07 +0000 Subject: Fatal error --- src/rabbit_channel.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b8788983..ec3088dd 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1076,7 +1076,7 @@ monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), consumer_monitors = dict:store(MRef, ConsumerTag, ConsumerMonitors)}; - _X -> + _ -> %% either already received the cancel or incapable client State end. -- cgit v1.2.1 From 4584e37187aba68d53597605b71858350c2e6efa Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sat, 19 Feb 2011 23:34:39 +0000 Subject: Include LSB Default-Start and Default-Stop lines in init script Runlevel uses vary between distros; the runlevels given here correspond to the most common Linux conventions. In particular, runlevel 2 is in the stop list, because in most distros (Debian being a notable exception) it means that network services should not be started. --- packaging/common/rabbitmq-server.init | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 39d23983..5a43be5d 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -10,8 +10,8 @@ # Provides: rabbitmq-server # Required-Start: $remote_fs $network # Required-Stop: $remote_fs $network -# Default-Start: -# Default-Stop: +# Default-Start: 3 4 5 +# Default-Stop: 0 1 2 6 # Description: RabbitMQ broker # Short-Description: Enable AMQP service provided by RabbitMQ broker ### END INIT INFO -- cgit v1.2.1 From e6b9eb1b1599c01ec553f7a1910834b7ee4fa0da Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sat, 19 Feb 2011 23:36:36 +0000 Subject: Debian needs unusual LSB Default-Start and Default-Stop lines For Debian and descendants, there is no distinction between runlevel 2 and 3, 4, and 5. So we need to modify the Default-Start and Default-Stop lines in the init script accordingly. --- packaging/debs/Debian/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packaging/debs/Debian/Makefile b/packaging/debs/Debian/Makefile index ab05f732..221dbe4f 100644 --- a/packaging/debs/Debian/Makefile +++ b/packaging/debs/Debian/Makefile @@ -22,9 +22,13 @@ package: clean tar -zxvf $(DEBIAN_ORIG_TARBALL) cp -r debian $(UNPACKED_DIR) cp $(COMMON_DIR)/* $(UNPACKED_DIR)/debian/ +# Debian and descendants differ from most other distros in that +# runlevel 2 should start network services. sed -i \ -e 's|^DEFAULTS_FILE=.*$$|DEFAULTS_FILE=/etc/default/rabbitmq|' \ -e 's|^LOCK_FILE=.*$$|LOCK_FILE=|' \ + -e 's|^\(# Default-Start:\).*$$|\1 2 3 4 5|' \ + -e 's|^\(# Default-Stop:\).*$$|\1 0 1 6|' \ $(UNPACKED_DIR)/debian/rabbitmq-server.init sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \ $(UNPACKED_DIR)/debian/rabbitmq-script-wrapper -- cgit v1.2.1 From 67f728639d1a0923cfab0952e437d5855a062a71 Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sat, 19 Feb 2011 23:37:54 +0000 Subject: Obey Fedora guidelines regarding Default-Start and Default-Stop Fedora guidelines say that non-vital packages should omit Default-Start and Default-Stop. --- packaging/RPMS/Fedora/Makefile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packaging/RPMS/Fedora/Makefile b/packaging/RPMS/Fedora/Makefile index 74a1800a..2c0f9a6c 100644 --- a/packaging/RPMS/Fedora/Makefile +++ b/packaging/RPMS/Fedora/Makefile @@ -12,7 +12,7 @@ ifndef RPM_OS RPM_OS=fedora endif -ifeq "x$(RPM_OS)" "xsuse" +ifeq "$(RPM_OS)" "suse" REQUIRES=/sbin/chkconfig /sbin/service OS_DEFINES=--define '_initrddir /etc/init.d' --define 'dist .suse' else @@ -34,6 +34,11 @@ prepare: -e 's|^DEFAULTS_FILE=.*$$|DEFAULTS_FILE=/etc/sysconfig/rabbitmq|' \ -e 's|^LOCK_FILE=.*$$|LOCK_FILE=/var/lock/subsys/$$NAME|' \ SOURCES/rabbitmq-server.init +ifeq "$(RPM_OS)" "fedora" +# Fedora says that only vital services should have Default-Start + sed -i -e '/^# Default-Start:/d;/^# Default-Stop:/d' \ + SOURCES/rabbitmq-server.init +endif sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \ SOURCES/rabbitmq-script-wrapper cp rabbitmq-server.logrotate SOURCES/rabbitmq-server.logrotate @@ -41,5 +46,5 @@ prepare: server: prepare rpmbuild -ba --nodeps SPECS/rabbitmq-server.spec $(DEFINES) $(OS_DEFINES) -clean: +clean: rm -rf SOURCES SPECS RPMS SRPMS BUILD tmp -- cgit v1.2.1 From dfb0d30bde61730a5b6ddd5a51a98a39d447cba6 Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sun, 20 Feb 2011 00:11:18 +0000 Subject: Fix "File listed twice" warnings when building RPMs --- packaging/RPMS/Fedora/rabbitmq-server.spec | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index 5d573bde..79c9607c 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -65,12 +65,8 @@ mkdir -p %{buildroot}%{_sysconfdir}/rabbitmq rm %{_maindir}/LICENSE %{_maindir}/LICENSE-MPL-RabbitMQ %{_maindir}/INSTALL #Build the list of files -rm -f %{_builddir}/%{name}.files -echo '%defattr(-,root,root, -)' >> %{_builddir}/%{name}.files -(cd %{buildroot}; \ - find . -type f ! -regex '\.%{_sysconfdir}.*' \ - ! -regex '\.\(%{_rabbit_erllibdir}\|%{_rabbit_libdir}\).*' \ - | sed -e 's/^\.//' >> %{_builddir}/%{name}.files) +echo '%defattr(-,root,root, -)' >%{_builddir}/%{name}.files +find %{buildroot} -path %{buildroot}%{_sysconfdir} -prune -o '!' -type d -printf "/%%P\n" >>%{_builddir}/%{name}.files %pre @@ -117,8 +113,6 @@ done %attr(0750, rabbitmq, rabbitmq) %dir %{_localstatedir}/lib/rabbitmq %attr(0750, rabbitmq, rabbitmq) %dir %{_localstatedir}/log/rabbitmq %dir %{_sysconfdir}/rabbitmq -%{_rabbit_erllibdir} -%{_rabbit_libdir} %{_initrddir}/rabbitmq-server %config(noreplace) %{_sysconfdir}/logrotate.d/rabbitmq-server %doc LICENSE LICENSE-MPL-RabbitMQ -- cgit v1.2.1 From be8cb807748f28021d38c62f158f095874d9d607 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Feb 2011 17:36:09 +0000 Subject: renames and refactors --- src/rabbit_channel.erl | 24 +++++++++++------------- src/rabbit_reader.erl | 8 ++++---- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index abda1c1f..28f3673d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -299,7 +299,7 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, noreply( case dict:find(MRef, ConsumerMonitors) of error -> - handle_non_consumer_down(QPid, Reason, State); + handle_queue_down(QPid, Reason, State); {ok, ConsumerTag} -> handle_consumer_down(MRef, ConsumerTag, State) end). @@ -717,7 +717,10 @@ handle_method(#'basic.consume'{queue = QueueNameBin, {Q, undefined}, ConsumerMapping)}, {noreply, - maybe_monitor_consumer(NoWait, ActualConsumerTag, State1)}; + case NoWait of + true -> monitor_consumer(ActualConsumerTag, State1); + false -> State1 + end}; {{error, exclusive_consume_unavailable}, _Q} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -1085,29 +1088,24 @@ handle_method(_MethodRecord, _Content, _State) -> %%---------------------------------------------------------------------------- -maybe_monitor_consumer(true, ConsumerTag, State) -> - monitor_consumer(ConsumerTag, State); -maybe_monitor_consumer(false, _ConsumerTag, State) -> - State. - monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, consumer_monitors = ConsumerMonitors, capabilities = Capabilities}) -> - case {dict:find(ConsumerTag, ConsumerMapping), - rabbit_misc:table_lookup( - Capabilities, <<"consumer_death_notification">>)} of - {{ok, {#amqqueue{pid = QPid} = Q, undefined}}, {bool, true}} -> + {#amqqueue{pid = QPid} = Q, undefined} = dict:fetch(ConsumerTag, + ConsumerMapping), + case rabbit_misc:table_lookup( + Capabilities, <<"consumer_cancel_notify">>) of + {bool, true} -> MRef = erlang:monitor(process, QPid), State#ch{consumer_mapping = dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), consumer_monitors = dict:store(MRef, ConsumerTag, ConsumerMonitors)}; _ -> - %% either already received the cancel or incapable client State end. -handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> +handle_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> MsgSeqNos = case gb_trees:lookup(QPid, UQM) of {value, MsgSet} -> gb_sets:to_list(MsgSet); none -> [] diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index c5d6ecc4..aa7d2775 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -163,10 +163,10 @@ server_properties(Protocol) -> NormalizedConfigServerProps). server_capabilities(rabbit_framing_amqp_0_9_1) -> - [{<<"publisher_confirms">>, bool, true}, - {<<"exchange_exchange_bindings">>, bool, true}, - {<<"basic.nack">>, bool, true}, - {<<"consumer_death_notification">>, bool, true}]; + [{<<"publisher_confirms">>, bool, true}, + {<<"exchange_exchange_bindings">>, bool, true}, + {<<"basic.nack">>, bool, true}, + {<<"consumer_cancel_notify">>, bool, true}]; server_capabilities(_) -> []. -- cgit v1.2.1 From bb9d2725ae586f40a6868b6af7ba4980e2ae3725 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Feb 2011 18:15:44 +0000 Subject: cosmetic and code movement --- src/rabbit_channel.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 28f3673d..fe6522fe 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -298,10 +298,8 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, State = #ch{consumer_monitors = ConsumerMonitors}) -> noreply( case dict:find(MRef, ConsumerMonitors) of - error -> - handle_queue_down(QPid, Reason, State); - {ok, ConsumerTag} -> - handle_consumer_down(MRef, ConsumerTag, State) + error -> handle_queue_down(QPid, Reason, State); + {ok, ConsumerTag} -> handle_consumer_down(MRef, ConsumerTag, State) end). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> @@ -1091,11 +1089,11 @@ handle_method(_MethodRecord, _Content, _State) -> monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, consumer_monitors = ConsumerMonitors, capabilities = Capabilities}) -> - {#amqqueue{pid = QPid} = Q, undefined} = dict:fetch(ConsumerTag, - ConsumerMapping), case rabbit_misc:table_lookup( Capabilities, <<"consumer_cancel_notify">>) of {bool, true} -> + {#amqqueue{pid = QPid} = Q, undefined} = + dict:fetch(ConsumerTag, ConsumerMapping), MRef = erlang:monitor(process, QPid), State#ch{consumer_mapping = dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), -- cgit v1.2.1 From a25d080a27495b7306a282086e3e2c1ccb7d86be Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Feb 2011 11:15:28 +0000 Subject: Make sure logging is working if we're about to actually do something. --- src/rabbit_upgrade.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9f33fd03..dd19de19 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -107,6 +107,7 @@ maybe_upgrade_mnesia() -> KnownDiscNodes = rabbit_mnesia:read_cluster_nodes_config(), case upgrades_required(mnesia) of version_not_available -> + rabbit:prepare(), %% Ensure we have logs for this case AllNodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " @@ -116,6 +117,7 @@ maybe_upgrade_mnesia() -> [] -> ok; Upgrades -> + rabbit:prepare(), %% Ensure we have logs for this case upgrade_mode(AllNodes, KnownDiscNodes) of primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(KnownDiscNodes) -- cgit v1.2.1 From bd6e51846b5fbbb6d407f0f1482b054563e1cecc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Feb 2011 12:50:49 +0000 Subject: Don't look at the cluster config, it is not trustworthy (for what we want). --- src/rabbit_mnesia.erl | 2 +- src/rabbit_upgrade.erl | 33 +++++++++++++++++++-------------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 97c4d11e..68654e46 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -18,7 +18,7 @@ -module(rabbit_mnesia). -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, - cluster/1, force_cluster/1, reset/0, force_reset/0, + cluster/1, force_cluster/1, reset/0, force_reset/0, init_db/2, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dd19de19..f1f0d6d3 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -104,7 +104,6 @@ maybe_upgrade_mnesia() -> AllNodes = rabbit_mnesia:all_clustered_nodes(), - KnownDiscNodes = rabbit_mnesia:read_cluster_nodes_config(), case upgrades_required(mnesia) of version_not_available -> rabbit:prepare(), %% Ensure we have logs for this @@ -118,18 +117,18 @@ maybe_upgrade_mnesia() -> ok; Upgrades -> rabbit:prepare(), %% Ensure we have logs for this - case upgrade_mode(AllNodes, KnownDiscNodes) of + case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); - secondary -> secondary_upgrade(KnownDiscNodes) + secondary -> secondary_upgrade(AllNodes) end end, ok = rabbit_mnesia:delete_previous_run_disc_nodes(). -upgrade_mode(AllNodes, KnownDiscNodes) -> +upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), - case {am_i_disc_node(KnownDiscNodes), AfterUs} of + case {am_i_disc_node(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -170,13 +169,11 @@ upgrade_mode(AllNodes, KnownDiscNodes) -> end end. -am_i_disc_node(KnownDiscNodes) -> - %% The cluster config does not list all disc nodes, but it will list us - %% if we're one. - case KnownDiscNodes of - [] -> true; - DiscNodes -> lists:member(node(), DiscNodes) - end. +am_i_disc_node() -> + %% This is pretty ugly but we can't start Mnesia and ask it (will hang), + %% we can't look at the config file (may not include us even if we're a + %% disc node). + filelib:is_regular(rabbit_mnesia:dir() ++ "/rabbit_durable_exchange.DCD"). die(Msg, Args) -> %% We don't throw or exit here since that gets thrown @@ -207,10 +204,18 @@ primary_upgrade(Upgrades, Nodes) -> force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. -secondary_upgrade(KnownDiscNodes) -> +secondary_upgrade(AllNodes) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), - ok = rabbit_mnesia:create_cluster_nodes_config(KnownDiscNodes), + %% Note that we cluster with all nodes, rather than all disc nodes + %% (as we can't know all disc nodes at this point). This is safe as + %% we're not writing the cluster config, just setting up Mnesia. + ClusterNodes = case am_i_disc_node() of + true -> AllNodes; + false -> AllNodes -- [node()] + end, + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + rabbit_mnesia:init_db(ClusterNodes, true), write_version(mnesia), ok. -- cgit v1.2.1 From 587dc8810fc2cc68572dafc53024c9f267b0ebe7 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Feb 2011 14:13:45 +0000 Subject: remove redundant function --- src/rabbit_mnesia.erl | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a9b4e177..74fde4a2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -371,7 +371,7 @@ init_db(ClusterNodes, Force) -> %% True single disc node, attempt upgrade ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade() of - ok -> ensure_schema_ok(); + ok -> ensure_schema_integrity(); version_not_available -> schema_ok_or_move() end; {[], true, _} -> @@ -379,7 +379,7 @@ init_db(ClusterNodes, Force) -> %% verify schema ok = wait_for_tables(), ensure_version_ok(rabbit_upgrade:read_version()), - ensure_schema_ok(); + ensure_schema_integrity(); {[], false, _} -> %% Nothing there at all, start from scratch ok = create_schema(); @@ -396,7 +396,7 @@ init_db(ClusterNodes, Force) -> true -> disc; false -> ram end), - ensure_schema_ok() + ensure_schema_integrity() end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -429,12 +429,6 @@ ensure_version_ok({ok, DiscVersion}) -> ensure_version_ok({error, _}) -> ok = rabbit_upgrade:write_version(). -ensure_schema_ok() -> - case check_schema_integrity() of - ok -> ok; - {error, Reason} -> throw({error, {schema_invalid, Reason}}) - end. - create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), -- cgit v1.2.1 From 4ed65ad1f0bec39722e5d91320c1f5208f804eb9 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Feb 2011 14:35:34 +0000 Subject: remove redundant calls to wait_for_tables ensure_schema_integrity calls it indirectly anyway --- src/rabbit_mnesia.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 74fde4a2..a30f7996 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -377,7 +377,6 @@ init_db(ClusterNodes, Force) -> {[], true, _} -> %% "Master" (i.e. without config) disc node in cluster, %% verify schema - ok = wait_for_tables(), ensure_version_ok(rabbit_upgrade:read_version()), ensure_schema_integrity(); {[], false, _} -> @@ -437,7 +436,6 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = wait_for_tables(), ok = rabbit_upgrade:write_version(). move_db() -> -- cgit v1.2.1 From 5b70262f2421af39e76b29b57fef44375ea44c9b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Feb 2011 14:41:24 +0000 Subject: Revert d3fd719c5287 (Remove should_offer/1). --- include/rabbit_auth_mechanism_spec.hrl | 1 + src/rabbit_auth_mechanism.erl | 4 ++++ src/rabbit_auth_mechanism_amqplain.erl | 5 ++++- src/rabbit_auth_mechanism_cr_demo.erl | 5 ++++- src/rabbit_auth_mechanism_plain.erl | 5 ++++- src/rabbit_reader.erl | 18 +++++++++--------- 6 files changed, 26 insertions(+), 12 deletions(-) diff --git a/include/rabbit_auth_mechanism_spec.hrl b/include/rabbit_auth_mechanism_spec.hrl index 49614d5f..614a3eed 100644 --- a/include/rabbit_auth_mechanism_spec.hrl +++ b/include/rabbit_auth_mechanism_spec.hrl @@ -17,6 +17,7 @@ -ifdef(use_specs). -spec(description/0 :: () -> [{atom(), any()}]). +-spec(should_offer/1 :: (rabbit_net:socket()) -> boolean()). -spec(init/1 :: (rabbit_net:socket()) -> any()). -spec(handle_response/2 :: (binary(), any()) -> {'ok', rabbit_types:user()} | diff --git a/src/rabbit_auth_mechanism.erl b/src/rabbit_auth_mechanism.erl index 1d14f9f0..897199ee 100644 --- a/src/rabbit_auth_mechanism.erl +++ b/src/rabbit_auth_mechanism.erl @@ -23,6 +23,10 @@ behaviour_info(callbacks) -> %% A description. {description, 0}, + %% If this mechanism is enabled, should it be offered for a given socket? + %% (primarily so EXTERNAL can be SSL-only) + {should_offer, 1}, + %% Called before authentication starts. Should create a state %% object to be passed through all the stages of authentication. {init, 1}, diff --git a/src/rabbit_auth_mechanism_amqplain.erl b/src/rabbit_auth_mechanism_amqplain.erl index 5e422eee..2168495d 100644 --- a/src/rabbit_auth_mechanism_amqplain.erl +++ b/src/rabbit_auth_mechanism_amqplain.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_auth_mechanism). --export([description/0, init/1, handle_response/2]). +-export([description/0, should_offer/1, init/1, handle_response/2]). -include("rabbit_auth_mechanism_spec.hrl"). @@ -38,6 +38,9 @@ description() -> [{name, <<"AMQPLAIN">>}, {description, <<"QPid AMQPLAIN mechanism">>}]. +should_offer(_Sock) -> + true. + init(_Sock) -> []. diff --git a/src/rabbit_auth_mechanism_cr_demo.erl b/src/rabbit_auth_mechanism_cr_demo.erl index 7fd20f8b..77aa34ea 100644 --- a/src/rabbit_auth_mechanism_cr_demo.erl +++ b/src/rabbit_auth_mechanism_cr_demo.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_auth_mechanism). --export([description/0, init/1, handle_response/2]). +-export([description/0, should_offer/1, init/1, handle_response/2]). -include("rabbit_auth_mechanism_spec.hrl"). @@ -43,6 +43,9 @@ description() -> {description, <<"RabbitMQ Demo challenge-response authentication " "mechanism">>}]. +should_offer(_Sock) -> + true. + init(_Sock) -> #state{}. diff --git a/src/rabbit_auth_mechanism_plain.erl b/src/rabbit_auth_mechanism_plain.erl index 1ca07018..e2f9bff9 100644 --- a/src/rabbit_auth_mechanism_plain.erl +++ b/src/rabbit_auth_mechanism_plain.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_auth_mechanism). --export([description/0, init/1, handle_response/2]). +-export([description/0, should_offer/1, init/1, handle_response/2]). -include("rabbit_auth_mechanism_spec.hrl"). @@ -41,6 +41,9 @@ description() -> [{name, <<"PLAIN">>}, {description, <<"SASL PLAIN authentication mechanism">>}]. +should_offer(_Sock) -> + true. + init(_Sock) -> []. diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 3908b646..29321c60 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -564,7 +564,7 @@ start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision}, version_major = ProtocolMajor, version_minor = ProtocolMinor, server_properties = server_properties(Protocol), - mechanisms = auth_mechanisms_binary(), + mechanisms = auth_mechanisms_binary(Sock), locales = <<"en_US">> }, ok = send_on_channel0(Sock, Start, Protocol), switch_callback(State#v1{connection = Connection#connection{ @@ -616,7 +616,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism, State0 = #v1{connection_state = starting, connection = Connection, sock = Sock}) -> - AuthMechanism = auth_mechanism_to_module(Mechanism), + AuthMechanism = auth_mechanism_to_module(Mechanism, Sock), Capabilities = case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of {table, Capabilities1} -> Capabilities1; @@ -709,14 +709,14 @@ handle_method0(_Method, #v1{connection_state = S}) -> send_on_channel0(Sock, Method, Protocol) -> ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -auth_mechanism_to_module(TypeBin) -> +auth_mechanism_to_module(TypeBin, Sock) -> case rabbit_registry:binary_to_type(TypeBin) of {error, not_found} -> rabbit_misc:protocol_error( command_invalid, "unknown authentication mechanism '~s'", [TypeBin]); T -> - case {lists:member(T, auth_mechanisms()), + case {lists:member(T, auth_mechanisms(Sock)), rabbit_registry:lookup_module(auth_mechanism, T)} of {true, {ok, Module}} -> Module; @@ -727,15 +727,15 @@ auth_mechanism_to_module(TypeBin) -> end end. -auth_mechanisms() -> +auth_mechanisms(Sock) -> {ok, Configured} = application:get_env(auth_mechanisms), - [Name || {Name, _Module} <- rabbit_registry:lookup_all(auth_mechanism), - lists:member(Name, Configured)]. + [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism), + Module:should_offer(Sock), lists:member(Name, Configured)]. -auth_mechanisms_binary() -> +auth_mechanisms_binary(Sock) -> list_to_binary( string:join( - [atom_to_list(A) || A <- auth_mechanisms()], " ")). + [atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")). auth_phase(Response, State = #v1{auth_mechanism = AuthMechanism, -- cgit v1.2.1 From 102eb1221e34274c2fa54595d3c2fd258645f410 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 22 Feb 2011 14:43:08 +0000 Subject: Sender-specified destinations updates build on R12B3 reduce mnesia lookups --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_basic.erl | 31 +++++++++++++------------- src/rabbit_exchange_type_direct.erl | 3 +-- src/rabbit_exchange_type_fanout.erl | 2 +- src/rabbit_msg_file.erl | 24 ++++++++++---------- src/rabbit_msg_store.erl | 41 +++++++++++++++++++++++++---------- src/rabbit_router.erl | 17 ++++++++++++--- src/rabbit_variable_queue.erl | 32 +++------------------------ 8 files changed, 78 insertions(+), 74 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 17cdedc2..4889abff 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,4 +65,4 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(multiple_routing_keys/0 :: () -> 'ok'). +-spec(store_names/0 :: () -> [atom()]). diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 503f01bc..376a303e 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -31,7 +31,6 @@ -type(publish_result() :: ({ok, rabbit_router:routing_result(), [pid()]} | rabbit_types:error('not_found'))). --type(msg_or_error() :: {'ok', rabbit_types:message()} | {'error', any()}). -spec(publish/1 :: (rabbit_types:delivery()) -> publish_result()). @@ -41,10 +40,11 @@ rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> msg_or_error()). + properties_input(), binary()) -> rabbit_types:message()). -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - rabbit_types:decoded_content()) -> msg_or_error()). + rabbit_types:decoded_content()) -> {'ok', rabbit_types:message()} | + {'error', any()}). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -98,17 +98,19 @@ from_content(Content) -> {Props, list_to_binary(lists:reverse(FragmentsRev))}. %% This breaks the spec rule forbidding message modification +strip_header(#content{properties = #'P_basic'{headers = undefined}} + = DecodedContent, _Key) -> + DecodedContent; strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} - = DecodedContent, Key) when Headers =/= undefined -> - case lists:keyfind(Key, 1, Headers) of - false -> DecodedContent; - Found -> Headers0 = lists:delete(Found, Headers), - rabbit_binary_generator:clear_encoded_content( - DecodedContent#content{ - properties = Props#'P_basic'{headers = Headers0}}) - end; -strip_header(DecodedContent, _Key) -> - DecodedContent. + = DecodedContent, Key) -> + case lists:keysearch(Key, 1, Headers) of + false -> DecodedContent; + {value, Found} -> Headers0 = lists:delete(Found, Headers), + rabbit_binary_generator:clear_encoded_content( + DecodedContent#content{ + properties = Props#'P_basic'{ + headers = Headers0}}) + end. message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> @@ -170,7 +172,7 @@ is_message_persistent(#content{properties = #'P_basic'{ 1 -> false; 2 -> true; undefined -> false; - _ -> false + Other -> throw({error, {delivery_mode_unknown, Other}}) end. % Extract CC routes from headers @@ -185,4 +187,3 @@ header_routes(HeadersTable) -> Type, binary_to_list(HeaderKey)}}) end || HeaderKey <- ?ROUTING_HEADERS]). - diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 82776c4a..349c2f6e 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -37,8 +37,7 @@ description() -> route(#exchange{name = Name}, #delivery{message = #basic_message{routing_keys = Routes}}) -> - lists:append([rabbit_router:match_routing_key(Name, RKey) || - RKey <- Routes]). + rabbit_router:match_routing_key(Name, Routes). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 382fb627..bc5293c8 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -36,7 +36,7 @@ description() -> {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, _Delivery) -> - rabbit_router:match_routing_key(Name, '_'). + rabbit_router:match_routing_key(Name, ['_']). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 81f2f07e..55e6ac47 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -80,28 +80,28 @@ read(FileHdl, TotalSize) -> end. scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, Acc, 0, Fun). + scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc). -scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset, _Fun) -> +scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) -> {ok, Acc, ScanOffset}; -scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset, Fun) -> +scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) -> Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]), case file_handle_cache:read(FileHdl, Read) of {ok, Data1} -> {Data2, Acc1, ScanOffset1} = - scanner(<>, Acc, ScanOffset, Fun), + scanner(<>, ScanOffset, Fun, Acc), ReadOffset1 = ReadOffset + size(Data1), - scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1, Fun); + scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1); _KO -> {ok, Acc, ScanOffset} end. -scanner(<<>>, Acc, Offset, _Fun) -> +scanner(<<>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; -scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset, _Fun) -> +scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; %% Nothing to do other than stop. scanner(<>, Acc, Offset, Fun) -> + WriteMarker:?WRITE_OK_SIZE_BITS, Rest/binary>>, Offset, Fun, Acc) -> TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, case WriteMarker of ?WRITE_OK_MARKER -> @@ -113,10 +113,10 @@ scanner(<> = <>, <> = <>, - scanner(Rest, Fun({Guid, TotalSize, Offset, Msg}, Acc), - Offset + TotalSize, Fun); + scanner(Rest, Offset + TotalSize, Fun, + Fun({Guid, TotalSize, Offset, Msg}, Acc)); _ -> - scanner(Rest, Acc, Offset + TotalSize, Fun) + scanner(Rest, Offset + TotalSize, Fun, Acc) end; -scanner(Data, Acc, Offset, _Fun) -> +scanner(Data, Offset, _Fun, Acc) -> {Data, Acc, Offset}. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index a2f6d7e2..d798c4f7 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -26,7 +26,7 @@ -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal --export([transform_dir/3, force_recovery/2]). %% upgrade +-export([multiple_routing_keys/0]). %% upgrade -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). @@ -106,6 +106,8 @@ %%---------------------------------------------------------------------------- +-rabbit_upgrade({multiple_routing_keys, []}). + -ifdef(use_specs). -export_type([gc_state/0, file_num/0]). @@ -164,9 +166,7 @@ -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). --spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). --spec(transform_dir/3 :: (file:filename(), server(), - fun ((binary()) -> ({'ok', msg()} | {error, any()}))) -> 'ok'). +-spec(multiple_routing_keys/0 :: () -> 'ok'). -endif. @@ -1968,6 +1968,25 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {destination, Destination}]} end. +%%---------------------------------------------------------------------------- +%% upgrade +%%---------------------------------------------------------------------------- + +multiple_routing_keys() -> + [transform_store( + fun ({basic_message, ExchangeName, Routing_Key, Content, + Guid, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + (_) -> {error, corrupt_message} + end, Store) || Store <- rabbit_variable_queue:store_names()], + ok. + +%% Assumes message store is not running +transform_store(TransformFun, Store) -> + force_recovery(rabbit_mnesia:dir(), Store), + transform_dir(rabbit_mnesia:dir(), Store, TransformFun). + force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), @@ -1975,10 +1994,10 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(D, Fun, Files) -> +foreach_file(D, Fun, Files) -> [Fun(filename:join(D, File)) || File <- Files]. -for_each_file(D1, D2, Fun, Files) -> +foreach_file(D1, D2, Fun, Files) -> [Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> @@ -1988,11 +2007,11 @@ transform_dir(BaseDir, Store, TransformFun) -> case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(Dir, TmpDir, TransformFile, OldFileList), - for_each_file(Dir, fun file:delete/1, OldFileList), + foreach_file(Dir, TmpDir, TransformFile, OldFileList), + foreach_file(Dir, fun file:delete/1, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), - for_each_file(TmpDir, fun file:delete/1, NewFileList), + foreach_file(TmpDir, Dir, fun file:copy/2, NewFileList), + foreach_file(TmpDir, fun file:delete/1, NewFileList), ok = file:del_dir(TmpDir) end. @@ -2007,7 +2026,7 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_msg_file:scan( RefOld, Size, fun({Guid, _Size, _Offset, BinMsg}, ok) -> - case TransformFun(BinMsg) of + case TransformFun(binary_to_term(BinMsg)) of {ok, MsgNew} -> {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), ok; diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 692d2473..53e707f4 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -37,7 +37,8 @@ fun ((rabbit_types:binding()) -> boolean())) -> match_result()). -spec(match_routing_key/2 :: (rabbit_types:binding_source(), - routing_key() | '_') -> match_result()). + [routing_key()] | ['_']) -> + match_result()). -endif. @@ -82,12 +83,22 @@ match_bindings(SrcName, Match) -> Match(Binding)]), mnesia:async_dirty(fun qlc:e/1, [Query]). -match_routing_key(SrcName, RoutingKey) -> +match_routing_key(SrcName, [RoutingKey]) -> MatchHead = #route{binding = #binding{source = SrcName, destination = '$1', key = RoutingKey, _ = '_'}}, - mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]). + mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]); +match_routing_key(SrcName, [_|_] = RoutingKeys) -> + Condition = list_to_tuple(['orelse' | [{'=:=', '$2', RKey} || + RKey <- RoutingKeys]]), + MatchHead = #route{binding = #binding{source = SrcName, + destination = '$1', + key = '$2', + _ = '_'}}, + mnesia:dirty_select(rabbit_route, [{MatchHead, [Condition], ['$1']}]). + + %%-------------------------------------------------------------------- diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index b0781f8f..4eb9c3b8 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, multiple_routing_keys/0]). + status/1, store_names/0]). -export([start/1, stop/0]). @@ -294,8 +294,6 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({multiple_routing_keys, []}). - -ifdef(use_specs). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). @@ -1804,29 +1802,5 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) end. -%%---------------------------------------------------------------------------- -%% Upgrading -%%---------------------------------------------------------------------------- - -multiple_routing_keys() -> - transform_storage( - fun (BinMsg) -> - case binary_to_term(BinMsg) of - {basic_message, ExchangeName, Routing_Key, Content, Guid, - Persistent} -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - _ -> - {error, corrupt_message} - end - end), - ok. - -%% Assumes message store is not running -transform_storage(TransformFun) -> - transform_store(?PERSISTENT_MSG_STORE, TransformFun), - transform_store(?TRANSIENT_MSG_STORE, TransformFun). - -transform_store(Store, TransformFun) -> - rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), - rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). +store_names() -> + [?PERSISTENT_MSG_STORE, ?TRANSIENT_MSG_STORE]. -- cgit v1.2.1 From cbcafda448298d83067c1c66536df1f49f52b7de Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Feb 2011 15:55:37 +0000 Subject: better error reporting for failed table integrity checks --- src/rabbit_mnesia.erl | 55 +++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a30f7996..42f7e3b2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -264,45 +264,48 @@ ensure_schema_integrity() -> check_schema_integrity() -> Tables = mnesia:system_info(tables), - case [Error || {Tab, TabDef} <- table_definitions(), - case lists:member(Tab, Tables) of - false -> - Error = {table_missing, Tab}, - true; - true -> - {_, ExpAttrs} = proplists:lookup(attributes, TabDef), - Attrs = mnesia:table_info(Tab, attributes), - Error = {table_attributes_mismatch, Tab, - ExpAttrs, Attrs}, - Attrs /= ExpAttrs - end] of - [] -> check_table_integrity(); - Errors -> {error, Errors} + case check_tables(fun (Tab, TabDef) -> + case lists:member(Tab, Tables) of + false -> {error, {table_missing, Tab}}; + true -> check_table_attributes(Tab, TabDef) + end + end) of + ok -> ok = wait_for_tables(), + check_tables(fun check_table_integrity/2); + Other -> Other end. -check_table_integrity() -> - ok = wait_for_tables(), - case lists:all(fun ({Tab, TabDef}) -> - {_, Match} = proplists:lookup(match, TabDef), - read_test_table(Tab, Match) - end, table_definitions()) of - true -> ok; - false -> {error, invalid_table_content} +check_table_attributes(Tab, TabDef) -> + {_, ExpAttrs} = proplists:lookup(attributes, TabDef), + case mnesia:table_info(Tab, attributes) of + ExpAttrs -> ok; + Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}} end. -read_test_table(Tab, Match) -> +check_table_integrity(Tab, TabDef) -> + {_, Match} = proplists:lookup(match, TabDef), case mnesia:dirty_first(Tab) of '$end_of_table' -> - true; + ok; Key -> ObjList = mnesia:dirty_read(Tab, Key), MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]), case ets:match_spec_run(ObjList, MatchComp) of - ObjList -> true; - _ -> false + ObjList -> ok; + _ -> {error, {table_content_invalid, Tab, Match, ObjList}} end end. +check_tables(Fun) -> + case [Error || {Tab, TabDef} <- table_definitions(), + case Fun(Tab, TabDef) of + ok -> Error = none, false; + {error, Error} -> true + end] of + [] -> ok; + Errors -> {error, Errors} + end. + %% The cluster node config file contains some or all of the disk nodes %% that are members of the cluster this node is / should be a part of. %% -- cgit v1.2.1 From 102c4420102346c0a66ff992eacb23630bd2d3f5 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Feb 2011 15:57:47 +0000 Subject: better name --- src/rabbit_mnesia.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 42f7e3b2..5e990d61 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -271,7 +271,7 @@ check_schema_integrity() -> end end) of ok -> ok = wait_for_tables(), - check_tables(fun check_table_integrity/2); + check_tables(fun check_table_content/2); Other -> Other end. @@ -282,7 +282,7 @@ check_table_attributes(Tab, TabDef) -> Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}} end. -check_table_integrity(Tab, TabDef) -> +check_table_content(Tab, TabDef) -> {_, Match} = proplists:lookup(match, TabDef), case mnesia:dirty_first(Tab) of '$end_of_table' -> -- cgit v1.2.1 From 9d3eb1f0bd42cc23d3ad2474721d0a0a4b4fcf8e Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 22 Feb 2011 16:57:39 +0000 Subject: Revert re-arrangement of upgrade steps --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_basic.erl | 4 ++-- src/rabbit_msg_store.erl | 33 ++++++--------------------------- src/rabbit_variable_queue.erl | 29 ++++++++++++++++++++++++++--- 4 files changed, 35 insertions(+), 33 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 4889abff..17cdedc2 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,4 +65,4 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(store_names/0 :: () -> [atom()]). +-spec(multiple_routing_keys/0 :: () -> 'ok'). diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 376a303e..f29cc805 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -43,8 +43,8 @@ properties_input(), binary()) -> rabbit_types:message()). -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - rabbit_types:decoded_content()) -> {'ok', rabbit_types:message()} | - {'error', any()}). + rabbit_types:decoded_content()) -> + rabbit_types:ok_or_error2(rabbit_types:message() | any())). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index d798c4f7..ef0e2e0d 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -26,7 +26,7 @@ -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal --export([multiple_routing_keys/0]). %% upgrade +-export([transform_dir/3, force_recovery/2]). %% upgrade -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). @@ -34,9 +34,8 @@ %%---------------------------------------------------------------------------- -include("rabbit_msg_store.hrl"). --include_lib("kernel/include/file.hrl"). --define(SYNC_INTERVAL, 25). %% milliseconds +-define(SYNC_INTERVAL, 5). %% milliseconds -define(CLEAN_FILENAME, "clean.dot"). -define(FILE_SUMMARY_FILENAME, "file_summary.ets"). -define(TRANSFORM_TMP, "transform_tmp"). @@ -106,8 +105,6 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({multiple_routing_keys, []}). - -ifdef(use_specs). -export_type([gc_state/0, file_num/0]). @@ -166,7 +163,9 @@ -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). --spec(multiple_routing_keys/0 :: () -> 'ok'). +-spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). +-spec(transform_dir/3 :: (file:filename(), server(), + fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'). -endif. @@ -1968,25 +1967,6 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {destination, Destination}]} end. -%%---------------------------------------------------------------------------- -%% upgrade -%%---------------------------------------------------------------------------- - -multiple_routing_keys() -> - [transform_store( - fun ({basic_message, ExchangeName, Routing_Key, Content, - Guid, Persistent}) -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - (_) -> {error, corrupt_message} - end, Store) || Store <- rabbit_variable_queue:store_names()], - ok. - -%% Assumes message store is not running -transform_store(TransformFun, Store) -> - force_recovery(rabbit_mnesia:dir(), Store), - transform_dir(rabbit_mnesia:dir(), Store, TransformFun). - force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), @@ -2017,14 +1997,13 @@ transform_dir(BaseDir, Store, TransformFun) -> transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_misc:ensure_parent_dirs_exist(FileNew), - {ok, #file_info{size=Size}} = file:read_file_info(FileOld), {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( - RefOld, Size, + RefOld, filelib:file_size(FileOld), fun({Guid, _Size, _Offset, BinMsg}, ok) -> case TransformFun(binary_to_term(BinMsg)) of {ok, MsgNew} -> diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 4eb9c3b8..3ef76d15 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, store_names/0]). + status/1, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -294,6 +294,8 @@ %%---------------------------------------------------------------------------- +-rabbit_upgrade({multiple_routing_keys, []}). + -ifdef(use_specs). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). @@ -1802,5 +1804,26 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) end. -store_names() -> - [?PERSISTENT_MSG_STORE, ?TRANSIENT_MSG_STORE]. +%%---------------------------------------------------------------------------- +%% Upgrading +%%---------------------------------------------------------------------------- + +multiple_routing_keys() -> + transform_storage( + fun ({basic_message, ExchangeName, Routing_Key, Content, + Guid, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + (_) -> {error, corrupt_message} + end), + ok. + + +%% Assumes message store is not running +transform_storage(TransformFun) -> + transform_store(?PERSISTENT_MSG_STORE, TransformFun), + transform_store(?TRANSIENT_MSG_STORE, TransformFun). + +transform_store(Store, TransformFun) -> + rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), + rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). -- cgit v1.2.1 From fd53e724c289b17eca48aa2252376231be51eb41 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 17:22:37 +0000 Subject: Added functional tests --- src/gm_soak_test.erl | 130 ++++++++++++++++++++++++++++++++++++++++ src/gm_test.erl | 126 --------------------------------------- src/gm_tests.erl | 165 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 295 insertions(+), 126 deletions(-) create mode 100644 src/gm_soak_test.erl delete mode 100644 src/gm_test.erl create mode 100644 src/gm_tests.erl diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl new file mode 100644 index 00000000..1f8832a6 --- /dev/null +++ b/src/gm_soak_test.erl @@ -0,0 +1,130 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gm_soak_test). + +-export([test/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +%% --------------------------------------------------------------------------- +%% Soak test +%% --------------------------------------------------------------------------- + +get_state() -> + get(state). + +with_state(Fun) -> + put(state, Fun(get_state())). + +inc() -> + case 1 + get(count) of + 100000 -> Now = os:timestamp(), + Start = put(ts, Now), + Diff = timer:now_diff(Now, Start), + Rate = 100000 / (Diff / 1000000), + io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), + put(count, 0); + N -> put(count, N) + end. + +joined([], Members) -> + io:format("Joined ~p (~p members)~n", [self(), length(Members)]), + put(state, dict:from_list([{Member, empty} || Member <- Members])), + put(count, 0), + put(ts, os:timestamp()), + ok. + +members_changed([], Births, Deaths) -> + with_state( + fun (State) -> + State1 = + lists:foldl( + fun (Born, StateN) -> + false = dict:is_key(Born, StateN), + dict:store(Born, empty, StateN) + end, State, Births), + lists:foldl( + fun (Died, StateN) -> + true = dict:is_key(Died, StateN), + dict:store(Died, died, StateN) + end, State1, Deaths) + end), + ok. + +handle_msg([], From, {test_msg, Num}) -> + inc(), + with_state( + fun (State) -> + ok = case dict:find(From, State) of + {ok, died} -> + exit({{from, From}, + {received_posthumous_delivery, Num}}); + {ok, empty} -> ok; + {ok, Num} -> ok; + {ok, Num1} when Num < Num1 -> + exit({{from, From}, + {duplicate_delivery_of, Num1}, + {expecting, Num}}); + {ok, Num1} -> + exit({{from, From}, + {missing_delivery_of, Num}, + {received_early, Num1}}); + error -> + exit({{from, From}, + {received_premature_delivery, Num}}) + end, + dict:store(From, Num + 1, State) + end), + ok. + +terminate([], Reason) -> + io:format("Left ~p (~p)~n", [self(), Reason]), + ok. + +spawn_member() -> + spawn_link( + fun () -> + random:seed(now()), + %% start up delay of no more than 10 seconds + timer:sleep(random:uniform(10000)), + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), + Start = random:uniform(10000), + send_loop(Pid, Start, Start + random:uniform(10000)), + gm:leave(Pid), + spawn_more() + end). + +spawn_more() -> + [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. + +send_loop(_Pid, Target, Target) -> + ok; +send_loop(Pid, Count, Target) when Target > Count -> + case random:uniform(3) of + 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); + _ -> gm:broadcast(Pid, {test_msg, Count}) + end, + timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms + send_loop(Pid, Count + 1, Target). + +test() -> + ok = gm:create_tables(), + spawn_member(), + spawn_member(). diff --git a/src/gm_test.erl b/src/gm_test.erl deleted file mode 100644 index e0a92a0c..00000000 --- a/src/gm_test.erl +++ /dev/null @@ -1,126 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. -%% - --module(gm_test). - --export([test/0]). --export([joined/2, members_changed/3, handle_msg/3, terminate/2]). - --behaviour(gm). - --include("gm_specs.hrl"). - -get_state() -> - get(state). - -with_state(Fun) -> - put(state, Fun(get_state())). - -inc() -> - case 1 + get(count) of - 100000 -> Now = os:timestamp(), - Start = put(ts, Now), - Diff = timer:now_diff(Now, Start), - Rate = 100000 / (Diff / 1000000), - io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), - put(count, 0); - N -> put(count, N) - end. - -joined([], Members) -> - io:format("Joined ~p (~p members)~n", [self(), length(Members)]), - put(state, dict:from_list([{Member, empty} || Member <- Members])), - put(count, 0), - put(ts, os:timestamp()), - ok. - -members_changed([], Births, Deaths) -> - with_state( - fun (State) -> - State1 = - lists:foldl( - fun (Born, StateN) -> - false = dict:is_key(Born, StateN), - dict:store(Born, empty, StateN) - end, State, Births), - lists:foldl( - fun (Died, StateN) -> - true = dict:is_key(Died, StateN), - dict:store(Died, died, StateN) - end, State1, Deaths) - end), - ok. - -handle_msg([], From, {test_msg, Num}) -> - inc(), - with_state( - fun (State) -> - ok = case dict:find(From, State) of - {ok, died} -> - exit({{from, From}, - {received_posthumous_delivery, Num}}); - {ok, empty} -> ok; - {ok, Num} -> ok; - {ok, Num1} when Num < Num1 -> - exit({{from, From}, - {duplicate_delivery_of, Num1}, - {expecting, Num}}); - {ok, Num1} -> - exit({{from, From}, - {missing_delivery_of, Num}, - {received_early, Num1}}); - error -> - exit({{from, From}, - {received_premature_delivery, Num}}) - end, - dict:store(From, Num + 1, State) - end), - ok. - -terminate([], Reason) -> - io:format("Left ~p (~p)~n", [self(), Reason]), - ok. - -spawn_member() -> - spawn_link( - fun () -> - random:seed(now()), - %% start up delay of no more than 10 seconds - timer:sleep(random:uniform(10000)), - {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), - Start = random:uniform(10000), - send_loop(Pid, Start, Start + random:uniform(10000)), - gm:leave(Pid), - spawn_more() - end). - -spawn_more() -> - [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. - -send_loop(_Pid, Target, Target) -> - ok; -send_loop(Pid, Count, Target) when Target > Count -> - case random:uniform(3) of - 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); - _ -> gm:broadcast(Pid, {test_msg, Count}) - end, - timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms - send_loop(Pid, Count + 1, Target). - -test() -> - ok = gm:create_tables(), - spawn_member(), - spawn_member(). diff --git a/src/gm_tests.erl b/src/gm_tests.erl new file mode 100644 index 00000000..38b3db2f --- /dev/null +++ b/src/gm_tests.erl @@ -0,0 +1,165 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gm_tests). + +-export([test_join_leave/0, + test_broadcast/0, + test_confirmed_broadcast/0, + test_member_death/0, + all_tests/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +joined(Pid, Members) -> + Pid ! {joined, self(), Members}, + ok. + +members_changed(Pid, Births, Deaths) -> + Pid ! {members_changed, self(), Births, Deaths}, + ok. + +handle_msg(Pid, From, Msg) -> + Pid ! {msg, self(), From, Msg}, + ok. + +terminate(Pid, Reason) -> + Pid ! {termination, self(), Reason}, + ok. + +%% --------------------------------------------------------------------------- +%% Functional tests +%% --------------------------------------------------------------------------- + +all_tests() -> + passed = test_join_leave(), + passed = test_broadcast(), + passed = test_confirmed_broadcast(), + passed = test_member_death(), + passed. + +test_join_leave() -> + with_two_members(fun (_Pid, _Pid2) -> passed end). + +test_broadcast() -> + test_broadcast(fun gm:broadcast/2). + +test_confirmed_broadcast() -> + test_broadcast(fun gm:confirmed_broadcast/2). + +test_member_death() -> + with_two_members( + fun (Pid, Pid2) -> + {ok, Pid3} = gm:start_link(?MODULE, ?MODULE, self()), + passed = receive_joined(Pid3, [Pid, Pid2, Pid3], + timeout_joining_gm_group_3), + passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1), + passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2), + + unlink(Pid3), + exit(Pid3, kill), + + passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))( + Pid, Pid2), + + passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1), + passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2), + + passed + end). + +test_broadcast(Fun) -> + with_two_members(test_broadcast_fun(Fun)). + +test_broadcast_fun(Fun) -> + fun (Pid, Pid2) -> + ok = Fun(Pid, magic_message), + passed = receive_or_throw({msg, Pid, Pid, magic_message}, + timeout_waiting_for_msg), + passed = receive_or_throw({msg, Pid2, Pid, magic_message}, + timeout_waiting_for_msg) + end. + +with_two_members(Fun) -> + ok = gm:create_tables(), + + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self()), + passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1), + + {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self()), + passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), + + passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), + + passed = Fun(Pid, Pid2), + + ok = gm:leave(Pid), + passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1), + passed = + receive_termination(Pid, normal, timeout_waiting_for_termination_1), + + ok = gm:leave(Pid2), + passed = + receive_termination(Pid2, normal, timeout_waiting_for_termination_2), + + receive X -> throw({unexpected_message, X}) + after 0 -> passed + end. + +receive_or_throw(Pattern, Error) -> + receive Pattern -> + passed + after 1000 -> + throw(Error) + end. + +receive_birth(From, Born, Error) -> + receive {members_changed, From, Birth, Death} -> + [Born] = Birth, + [] = Death, + passed + after 1000 -> + throw(Error) + end. + +receive_death(From, Died, Error) -> + receive {members_changed, From, Birth, Death} -> + [] = Birth, + [Died] = Death, + passed + after 1000 -> + throw(Error) + end. + +receive_joined(From, Members, Error) -> + Members1 = lists:usort(Members), + receive {joined, From, Members2} -> + Members1 = lists:usort(Members2), + passed + after 1000 -> + throw(Error) + end. + +receive_termination(From, Reason, Error) -> + receive {termination, From, Reason1} -> + Reason = Reason1, + passed + after 1000 -> + throw(Error) + end. -- cgit v1.2.1 From c3f44f9a82132f63ad9b1566874c054909c6733f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 22:54:12 +0000 Subject: Magic macroification --- src/gm_tests.erl | 53 +++++++++++++++++++++-------------------------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index 38b3db2f..bb92bc4c 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -27,6 +27,14 @@ -include("gm_specs.hrl"). +-define(RECEIVE_AFTER(Body, Bool, Error), + receive Body -> + true = Bool, + passed + after 1000 -> + throw(Error) + end). + joined(Pid, Members) -> Pid ! {joined, self(), Members}, ok. @@ -123,43 +131,24 @@ with_two_members(Fun) -> end. receive_or_throw(Pattern, Error) -> - receive Pattern -> - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER(Pattern, true, Error). receive_birth(From, Born, Error) -> - receive {members_changed, From, Birth, Death} -> - [Born] = Birth, - [] = Death, - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({members_changed, From, Birth, Death}, + ([Born] == Birth) andalso ([] == Death), + Error). receive_death(From, Died, Error) -> - receive {members_changed, From, Birth, Death} -> - [] = Birth, - [Died] = Death, - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({members_changed, From, Birth, Death}, + ([] == Birth) andalso ([Died] == Death), + Error). receive_joined(From, Members, Error) -> - Members1 = lists:usort(Members), - receive {joined, From, Members2} -> - Members1 = lists:usort(Members2), - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({joined, From, Members2}, + lists:usort(Members) == lists:usort(Members2), + Error). receive_termination(From, Reason, Error) -> - receive {termination, From, Reason1} -> - Reason = Reason1, - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({termination, From, Reason1}, + Reason == Reason1, + Error). -- cgit v1.2.1 From 8a6cb10fd4817ebf92303e397f797c1a3de6ed57 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 22:55:24 +0000 Subject: consistency --- src/gm_tests.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index bb92bc4c..fd9a6487 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -144,8 +144,8 @@ receive_death(From, Died, Error) -> Error). receive_joined(From, Members, Error) -> - ?RECEIVE_AFTER({joined, From, Members2}, - lists:usort(Members) == lists:usort(Members2), + ?RECEIVE_AFTER({joined, From, Members1}, + lists:usort(Members) == lists:usort(Members1), Error). receive_termination(From, Reason, Error) -> -- cgit v1.2.1 From 5597c0f213da52331b090c1f2f954ccf155dd0cd Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 22:57:01 +0000 Subject: rename --- src/gm_tests.erl | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index fd9a6487..87244153 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -27,7 +27,7 @@ -include("gm_specs.hrl"). --define(RECEIVE_AFTER(Body, Bool, Error), +-define(RECEIVE_OR_THROW(Body, Bool, Error), receive Body -> true = Bool, passed @@ -131,24 +131,24 @@ with_two_members(Fun) -> end. receive_or_throw(Pattern, Error) -> - ?RECEIVE_AFTER(Pattern, true, Error). + ?RECEIVE_OR_THROW(Pattern, true, Error). receive_birth(From, Born, Error) -> - ?RECEIVE_AFTER({members_changed, From, Birth, Death}, - ([Born] == Birth) andalso ([] == Death), - Error). + ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, + ([Born] == Birth) andalso ([] == Death), + Error). receive_death(From, Died, Error) -> - ?RECEIVE_AFTER({members_changed, From, Birth, Death}, - ([] == Birth) andalso ([Died] == Death), - Error). + ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, + ([] == Birth) andalso ([Died] == Death), + Error). receive_joined(From, Members, Error) -> - ?RECEIVE_AFTER({joined, From, Members1}, - lists:usort(Members) == lists:usort(Members1), - Error). + ?RECEIVE_OR_THROW({joined, From, Members1}, + lists:usort(Members) == lists:usort(Members1), + Error). receive_termination(From, Reason, Error) -> - ?RECEIVE_AFTER({termination, From, Reason1}, - Reason == Reason1, - Error). + ?RECEIVE_OR_THROW({termination, From, Reason1}, + Reason == Reason1, + Error). -- cgit v1.2.1 From a74602a5813a6915f3be26719e84a637fea337f5 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Feb 2011 12:52:55 +0000 Subject: Added test to assert receiving messages in the order they're sent. Other cosmetics --- src/gm_tests.erl | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index 87244153..65e9cff0 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -20,6 +20,7 @@ test_broadcast/0, test_confirmed_broadcast/0, test_member_death/0, + test_receive_in_order/0, all_tests/0]). -export([joined/2, members_changed/3, handle_msg/3, terminate/2]). @@ -60,6 +61,7 @@ all_tests() -> passed = test_broadcast(), passed = test_confirmed_broadcast(), passed = test_member_death(), + passed = test_receive_in_order(), passed. test_join_leave() -> @@ -83,6 +85,8 @@ test_member_death() -> unlink(Pid3), exit(Pid3, kill), + %% Have to do some broadcasts to ensure that all members + %% find out about the death. passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))( Pid, Pid2), @@ -92,6 +96,23 @@ test_member_death() -> passed end). +test_receive_in_order() -> + with_two_members( + fun (Pid, Pid2) -> + Numbers = lists:seq(1,1000), + [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end + || N <- Numbers], + passed = receive_numbers( + Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers), + passed = receive_numbers( + Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers), + passed = receive_numbers( + Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers), + passed = receive_numbers( + Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers), + passed + end). + test_broadcast(Fun) -> with_two_members(test_broadcast_fun(Fun)). @@ -112,7 +133,6 @@ with_two_members(Fun) -> {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self()), passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), - passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), passed = Fun(Pid, Pid2), @@ -152,3 +172,11 @@ receive_termination(From, Reason, Error) -> ?RECEIVE_OR_THROW({termination, From, Reason1}, Reason == Reason1, Error). + +receive_numbers(_Pid, _Sender, _Error, []) -> + passed; +receive_numbers(Pid, Sender, Error, [N | Numbers]) -> + ?RECEIVE_OR_THROW({msg, Pid, Sender, M}, + M == N, + Error), + receive_numbers(Pid, Sender, Error, Numbers). -- cgit v1.2.1 From eccf06819029cc5c72b0d8b166dca929ba42e620 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Feb 2011 12:54:40 +0000 Subject: Wire in gm_tests to rabbit tests --- src/rabbit_tests.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 49b09508..644c4f96 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -34,6 +34,7 @@ test_content_prop_roundtrip(Datum, Binary) -> Binary = rabbit_binary_generator:encode_properties(Types, Values). %% assertion all_tests() -> + passed = gm_tests:all_tests(), application:set_env(rabbit, file_handles_high_watermark, 10, infinity), ok = file_handle_cache:set_limit(10), passed = test_file_handle_cache(), -- cgit v1.2.1 From 1ee22ba19d1cdfab15811b75d6a4b7a3020eb38d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Feb 2011 13:09:16 +0000 Subject: correction of specs --- include/gm_specs.hrl | 2 +- src/gm.erl | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index 2109d15d..ee29706e 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -17,7 +17,7 @@ -ifdef(use_specs). -type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}). --type(args() :: [any()]). +-type(args() :: any()). -type(members() :: [pid()]). -spec(joined/2 :: (args(), members()) -> callback_result()). diff --git a/src/gm.erl b/src/gm.erl index 283b2431..b3fb7eca 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -53,13 +53,12 @@ %% to create the tables required. %% %% start_link/3 -%% Provide the group name, the callback module name, and a list of any -%% arguments you wish to be passed into the callback module's -%% functions. The joined/1 will be called when we have joined the -%% group, and the list of arguments will have appended to it a list of -%% the current members of the group. See the comments in -%% behaviour_info/1 below for further details of the callback -%% functions. +%% Provide the group name, the callback module name, and any arguments +%% you wish to be passed into the callback module's functions. The +%% joined/1 will be called when we have joined the group, and the list +%% of arguments will have appended to it a list of the current members +%% of the group. See the comments in behaviour_info/1 below for +%% further details of the callback functions. %% %% leave/1 %% Provide the Pid. Removes the Pid from the group. The callback @@ -421,7 +420,7 @@ -type(group_name() :: any()). -spec(create_tables/0 :: () -> 'ok'). --spec(start_link/3 :: (group_name(), atom(), [any()]) -> +-spec(start_link/3 :: (group_name(), atom(), any()) -> {'ok', pid()} | {'error', any()}). -spec(leave/1 :: (pid()) -> 'ok'). -spec(broadcast/2 :: (pid(), any()) -> 'ok'). -- cgit v1.2.1 From fff7752e4df43bdefecee6a9700b5d34df3097e5 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 23 Feb 2011 13:40:15 +0000 Subject: Fixed incorrect binding pattern in rabbit_mnesia --- src/rabbit_mnesia.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 25767a55..93e20381 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -232,8 +232,8 @@ trie_edge_match() -> #trie_edge{exchange_name = exchange_name_match(), _='_'}. trie_binding_match() -> - #trie_edge{exchange_name = exchange_name_match(), - _='_'}. + #trie_binding{exchange_name = exchange_name_match(), + _='_'}. exchange_name_match() -> resource_match(exchange). queue_name_match() -> -- cgit v1.2.1 From d86469a2af5cc68da909a4698a0ee634f2e8aa8b Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 23 Feb 2011 14:43:00 +0000 Subject: Removed table name intersection in wait_for_tables and cleaned up whitespace changes --- src/rabbit_mnesia.erl | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 93e20381..f2d23dad 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -388,8 +388,7 @@ init_db(ClusterNodes, Force) -> {[], true, [_]} -> %% True single disc node, attempt upgrade case rabbit_upgrade:maybe_upgrade() of - ok -> ok = wait_for_tables(), - ensure_schema_integrity(); + ok -> ensure_schema_integrity(); version_not_available -> schema_ok_or_move() end; {[], true, _} -> @@ -544,17 +543,15 @@ create_local_table_copy(Tab, Type) -> end, ok. -wait_for_replicated_tables() -> - wait_for_tables(replicated_table_names()). +wait_for_replicated_tables() -> wait_for_tables(replicated_table_names()). -wait_for_tables() -> - wait_for_tables(table_names()). +wait_for_tables() -> wait_for_tables(table_names()). wait_for_tables(TableNames) -> - Nonexistent = TableNames -- mnesia:system_info(tables), - case mnesia:wait_for_tables(TableNames -- Nonexistent, 30000) of - ok -> ok; - {timeout, BadTabs} -> + case mnesia:wait_for_tables(TableNames, 30000) of + ok -> + ok; + {timeout, BadTabs} -> throw({error, {timeout_waiting_for_tables, BadTabs}}); {error, Reason} -> throw({error, {failed_waiting_for_tables, Reason}}) -- cgit v1.2.1 From d2199eccd9ecbf0c50666fe793d780cdbbf23ef3 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 23 Feb 2011 15:12:38 +0000 Subject: cosmetic --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f2d23dad..d3cb492e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -551,7 +551,7 @@ wait_for_tables(TableNames) -> case mnesia:wait_for_tables(TableNames, 30000) of ok -> ok; - {timeout, BadTabs} -> + {timeout, BadTabs} -> throw({error, {timeout_waiting_for_tables, BadTabs}}); {error, Reason} -> throw({error, {failed_waiting_for_tables, Reason}}) -- cgit v1.2.1 From 4d36462a0eb49acca8190c9aa6e5b54a59fc5d18 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 24 Feb 2011 14:25:06 +0000 Subject: English, not American --- src/rabbit_reader.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 29321c60..b172db56 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -158,7 +158,7 @@ server_properties(Protocol) -> {copyright, ?COPYRIGHT_MESSAGE}, {information, ?INFORMATION_MESSAGE}]]], - %% Filter duplicated properties in favor of config file provided values + %% Filter duplicated properties in favour of config file provided values lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end, NormalizedConfigServerProps). -- cgit v1.2.1 From a64a627af2739a5556f00064c9b02443bd0c4215 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 24 Feb 2011 15:14:26 +0000 Subject: Dialyzer typo --- src/rabbit_basic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index f29cc805..57aad808 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -44,7 +44,7 @@ -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), rabbit_types:decoded_content()) -> - rabbit_types:ok_or_error2(rabbit_types:message() | any())). + rabbit_types:ok_or_error2(rabbit_types:message(), any())). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: -- cgit v1.2.1 From 6fd77744201852a1fb961809f693d8b27acf7346 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 24 Feb 2011 18:15:34 +0000 Subject: Make memory alarms work correctly over clusters --- Makefile | 4 +- src/rabbit_alarm.erl | 122 +++++++++++++++++++++++++++++++++++--------- src/rabbit_node_monitor.erl | 11 ++-- src/vm_memory_monitor.erl | 4 +- 4 files changed, 107 insertions(+), 34 deletions(-) diff --git a/Makefile b/Makefile index 00c7809d..cdb86aad 100644 --- a/Makefile +++ b/Makefile @@ -177,11 +177,11 @@ stop-rabbit-on-node: all echo "rabbit:stop()." | $(ERL_CALL) set-memory-alarm: all - echo "alarm_handler:set_alarm({vm_memory_high_watermark, []})." | \ + echo "alarm_handler:set_alarm({{vm_memory_high_watermark, node()}, []})." | \ $(ERL_CALL) clear-memory-alarm: all - echo "alarm_handler:clear_alarm(vm_memory_high_watermark)." | \ + echo "alarm_handler:clear_alarm({vm_memory_high_watermark, node()})." | \ $(ERL_CALL) stop-node: diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 37e40981..365a5ed2 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -18,12 +18,14 @@ -behaviour(gen_event). --export([start/0, stop/0, register/2]). +-export([start/0, stop/0, register/2, on_node/2]). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). --record(alarms, {alertees, vm_memory_high_watermark = false}). +-export([remote_conserve_memory/2]). %% Internal use only + +-record(alarms, {alertees, high_watermarks}). %%---------------------------------------------------------------------------- @@ -33,6 +35,7 @@ -spec(start/0 :: () -> 'ok'). -spec(stop/0 :: () -> 'ok'). -spec(register/2 :: (pid(), mfa_tuple()) -> boolean()). +-spec(on_node/2 :: ('up'|'down', node()) -> 'ok'). -endif. @@ -56,32 +59,61 @@ register(Pid, HighMemMFA) -> {register, Pid, HighMemMFA}, infinity). +on_node(Action, Node) -> + gen_event:notify(alarm_handler, {node, Action, Node}). + +remote_conserve_memory(Pid, Conserve) -> + RemoteNode = node(Pid), + %% Can't use alarm_handler:{set,clear}_alarm because that doesn't + %% permit notifying a remote node. + case Conserve of + true -> gen_event:notify( + {alarm_handler, RemoteNode}, + {set_alarm, {{vm_memory_high_watermark, node()}, []}}); + false -> gen_event:notify( + {alarm_handler, RemoteNode}, + {clear_alarm, {vm_memory_high_watermark, node()}}) + end. + %%---------------------------------------------------------------------------- init([]) -> - {ok, #alarms{alertees = dict:new()}}. + {ok, #alarms{alertees = dict:new(), + high_watermarks = sets:new()}}. -handle_call({register, Pid, {M, F, A} = HighMemMFA}, - State = #alarms{alertees = Alertess}) -> - _MRef = erlang:monitor(process, Pid), - ok = case State#alarms.vm_memory_high_watermark of - true -> apply(M, F, A ++ [Pid, true]); - false -> ok - end, - NewAlertees = dict:store(Pid, HighMemMFA, Alertess), - {ok, State#alarms.vm_memory_high_watermark, - State#alarms{alertees = NewAlertees}}; +handle_call({register, Pid, HighMemMFA}, State) -> + {ok, 0 < sets:size(State#alarms.high_watermarks), + internal_register(Pid, HighMemMFA, State)}; handle_call(_Request, State) -> {ok, not_understood, State}. -handle_event({set_alarm, {vm_memory_high_watermark, []}}, State) -> - ok = alert(true, State#alarms.alertees), - {ok, State#alarms{vm_memory_high_watermark = true}}; - -handle_event({clear_alarm, vm_memory_high_watermark}, State) -> - ok = alert(false, State#alarms.alertees), - {ok, State#alarms{vm_memory_high_watermark = false}}; +handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, + State = #alarms{high_watermarks = Highs}) -> + Highs1 = sets:add_element(Node, Highs), + ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, true), + {ok, State#alarms{high_watermarks = Highs1}}; + +handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, + State = #alarms{high_watermarks = Highs}) -> + Highs1 = sets:del_element(Node, Highs), + ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), + {ok, State#alarms{high_watermarks = Highs1}}; + +handle_event({node, up, Node}, State) -> + %% Must do this via notify and not call to avoid possible deadlock. + ok = gen_event:notify( + {alarm_handler, Node}, + {register, self(), {?MODULE, remote_conserve_memory, []}}), + {ok, State}; + +handle_event({node, down, Node}, State = #alarms{high_watermarks = Highs}) -> + Highs1 = sets:del_element(Node, Highs), + ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), + {ok, State#alarms{high_watermarks = Highs1}}; + +handle_event({register, Pid, HighMemMFA}, State) -> + {ok, internal_register(Pid, HighMemMFA, State)}; handle_event(_Event, State) -> {ok, State}. @@ -100,10 +132,50 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. %%---------------------------------------------------------------------------- -alert(_Alert, undefined) -> - ok; -alert(Alert, Alertees) -> + +maybe_alert(Before, After, Alertees, AlarmNode, Action) + when AlarmNode =:= node() -> + %% If we have changed our alarm state, always inform the remotes. + case {sets:is_element(AlarmNode, Before), sets:is_element(AlarmNode, After), + Action} of + {false, true, true} -> alert_remote(Action, Alertees); + {true, false, false} -> alert_remote(Action, Alertees); + _ -> ok + end, + maybe_alert_local(Before, After, Alertees, Action); +maybe_alert(Before, After, Alertees, _AlarmNode, Action) -> + maybe_alert_local(Before, After, Alertees, Action). + +maybe_alert_local(Before, After, Alertees, Action) -> + %% If the overall alarm state has changed, inform the locals. + case {sets:size(Before), sets:size(After), Action} of + {0, 1, true} -> alert_local(Action, Alertees); + {1, 0, false} -> alert_local(Action, Alertees); + _ -> ok + end. + +alert_local(Alert, Alertees) -> + alert(Alert, Alertees, fun erlang:'=:='/2). + +alert_remote(Alert, Alertees) -> + alert(Alert, Alertees, fun erlang:'=/='/2). + +alert(Alert, Alertees, NodeComparator) -> + Node = node(), dict:fold(fun (Pid, {M, F, A}, Acc) -> - ok = erlang:apply(M, F, A ++ [Pid, Alert]), - Acc + case NodeComparator(Node, node(Pid)) of + true -> ok = erlang:apply(M, F, A ++ [Pid, Alert]), + Acc; + false -> Acc + end end, ok, Alertees). + +internal_register(Pid, {M, F, A} = HighMemMFA, + State = #alarms{alertees = Alertees}) -> + _MRef = erlang:monitor(process, Pid), + ok = case sets:is_element(node(), State#alarms.high_watermarks) of + true -> apply(M, F, A ++ [Pid, true]); + false -> ok + end, + NewAlertees = dict:store(Pid, HighMemMFA, Alertees), + State#alarms{alertees = NewAlertees}. diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 817abaa2..061f628d 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -69,6 +69,7 @@ handle_call(_Request, _From, State) -> handle_cast({rabbit_running_on, Node}, State) -> rabbit_log:info("node ~p up~n", [Node]), erlang:monitor(process, {rabbit, Node}), + ok = rabbit_alarm:on_node(up, Node), {noreply, State}; handle_cast(_Msg, State) -> {noreply, State}. @@ -92,10 +93,10 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- -%% TODO: This may turn out to be a performance hog when there are -%% lots of nodes. We really only need to execute this code on -%% *one* node, rather than all of them. +%% TODO: This may turn out to be a performance hog when there are lots +%% of nodes. We really only need to execute some of these statements +%% on *one* node, rather than all of them. handle_dead_rabbit(Node) -> ok = rabbit_networking:on_node_down(Node), - ok = rabbit_amqqueue:on_node_down(Node). - + ok = rabbit_amqqueue:on_node_down(Node), + ok = rabbit_alarm:on_node(down, Node). diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl index 44e1e4b5..dcc6aff5 100644 --- a/src/vm_memory_monitor.erl +++ b/src/vm_memory_monitor.erl @@ -175,10 +175,10 @@ internal_update(State = #state { memory_limit = MemLimit, case {Alarmed, NewAlarmed} of {false, true} -> emit_update_info(set, MemUsed, MemLimit), - alarm_handler:set_alarm({vm_memory_high_watermark, []}); + alarm_handler:set_alarm({{vm_memory_high_watermark, node()}, []}); {true, false} -> emit_update_info(clear, MemUsed, MemLimit), - alarm_handler:clear_alarm(vm_memory_high_watermark); + alarm_handler:clear_alarm({vm_memory_high_watermark, node()}); _ -> ok end, -- cgit v1.2.1 From a6d046e3cbbde4320b201fd7d78a864749fe70a1 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 24 Feb 2011 18:32:56 +0000 Subject: Create log backups in the correct folder on Windows --- scripts/rabbitmq-server.bat | 15 ++++++--------- scripts/rabbitmq-service.bat | 15 ++++++--------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 2ca9f2b3..5e2097db 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -72,17 +72,14 @@ rem Log management (rotation, filtering based of size...) is left as an exercice set BACKUP_EXTENSION=.1 -set LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log -set SASL_LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log - -set LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log!BACKUP_EXTENSION! -set SASL_LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log!BACKUP_EXTENSION! +set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log +set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log if exist "!LOGS!" ( - type "!LOGS!" >> "!LOGS_BACKUP!" + type "!LOGS!" >> "!LOGS!!BACKUP_EXTENSION!" ) if exist "!SASL_LOGS!" ( - type "!SASL_LOGS!" >> "!SASL_LOGS_BACKUP!" + type "!SASL_LOGS!" >> "!SASL_LOGS!!BACKUP_EXTENSION!" ) rem End of log management @@ -144,10 +141,10 @@ if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( +P 1048576 ^ -kernel inet_default_connect_options "[{nodelay, true}]" ^ !RABBITMQ_LISTEN_ARG! ^ --kernel error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!.log"\"} ^ +-kernel error_logger {file,\""!LOGS:\=/!"\"} ^ !RABBITMQ_SERVER_ERL_ARGS! ^ -sasl errlog_type error ^ --sasl sasl_error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!-sasl.log"\"} ^ +-sasl sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^ -os_mon start_cpu_sup true ^ -os_mon start_disksup false ^ -os_mon start_memsup false ^ diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index bc452fea..aa428a8c 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -105,17 +105,14 @@ rem Log management (rotation, filtering based on size...) is left as an exercise set BACKUP_EXTENSION=.1 -set LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log -set SASL_LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log - -set LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log!BACKUP_EXTENSION! -set SASL_LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log!BACKUP_EXTENSION! +set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log +set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log if exist "!LOGS!" ( - type "!LOGS!" >> "!LOGS_BACKUP!" + type "!LOGS!" >> "!LOGS!!BACKUP_EXTENSION!" ) if exist "!SASL_LOGS!" ( - type "!SASL_LOGS!" >> "!SASL_LOGS_BACKUP!" + type "!SASL_LOGS!" >> "!SASL_LOGS!!BACKUP_EXTENSION!" ) rem End of log management @@ -209,10 +206,10 @@ set ERLANG_SERVICE_ARGUMENTS= ^ +A30 ^ -kernel inet_default_connect_options "[{nodelay,true}]" ^ !RABBITMQ_LISTEN_ARG! ^ --kernel error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!.log"\"} ^ +-kernel error_logger {file,\""!LOGS:\=/!"\"} ^ !RABBITMQ_SERVER_ERL_ARGS! ^ -sasl errlog_type error ^ --sasl sasl_error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!-sasl.log"\"} ^ +-sasl sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^ -os_mon start_cpu_sup true ^ -os_mon start_disksup false ^ -os_mon start_memsup false ^ -- cgit v1.2.1 From d7c926b9377343878f7bc263b8d44f6a1ae1cc8d Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 25 Feb 2011 12:17:17 +0000 Subject: No, we don't supply multi man page any more. --- packaging/macports/Portfile.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 67ebcf78..8c22a75e 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -104,7 +104,8 @@ post-destroot { file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmq-server file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmqctl - xinstall -m 644 -W ${mansrc}/man1 rabbitmq-multi.1.gz rabbitmq-server.1.gz rabbitmqctl.1.gz ${mandest}/man1/ + xinstall -m 644 -W ${mansrc}/man1 rabbitmq-server.1.gz rabbitmqctl.1.gz \ + ${mandest}/man1/ xinstall -m 644 -W ${mansrc}/man5 rabbitmq.conf.5.gz ${mandest}/man5/ } -- cgit v1.2.1 From 6c854337b76061d06fb1dd5e9db4976fc5b9e6f4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 12:25:16 +0000 Subject: Make documentation accurate for current API... --- src/gm.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index b3fb7eca..b21217f6 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -55,14 +55,14 @@ %% start_link/3 %% Provide the group name, the callback module name, and any arguments %% you wish to be passed into the callback module's functions. The -%% joined/1 will be called when we have joined the group, and the list -%% of arguments will have appended to it a list of the current members -%% of the group. See the comments in behaviour_info/1 below for -%% further details of the callback functions. +%% joined/2 will be called when we have joined the group, with the +%% arguments passed to start_link and a list of the current members of +%% the group. See the comments in behaviour_info/1 below for further +%% details of the callback functions. %% %% leave/1 %% Provide the Pid. Removes the Pid from the group. The callback -%% terminate/1 function will be called. +%% terminate/2 function will be called. %% %% broadcast/2 %% Provide the Pid and a Message. The message will be sent to all @@ -455,16 +455,16 @@ behaviour_info(callbacks) -> %% quickly, it's possible that we will never see that member %% appear in either births or deaths. However we are guaranteed %% that (1) we will see a member joining either in the births - %% here, or in the members passed to joined/1 before receiving + %% here, or in the members passed to joined/2 before receiving %% any messages from it; and (2) we will not see members die that %% we have not seen born (or supplied in the members to - %% joined/1). + %% joined/2). {members_changed, 3}, %% Supplied with Args provided in start_link, the sender, and the %% message. This does get called for messages injected by this %% member, however, in such cases, there is no special - %% significance of this call: it does not indicate that the + %% significance of this invocation: it does not indicate that the %% message has made it to any other members, let alone all other %% members. {handle_msg, 3}, -- cgit v1.2.1 From 522e08893e39b4f843f319d504812f8d60249769 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 25 Feb 2011 12:36:45 +0000 Subject: We renamed .conf to -env.conf. --- packaging/macports/Portfile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 7583d668..809f518b 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -104,7 +104,7 @@ post-destroot { xinstall -m 644 -W ${mansrc}/man1 rabbitmq-server.1.gz rabbitmqctl.1.gz \ ${mandest}/man1/ - xinstall -m 644 -W ${mansrc}/man5 rabbitmq.conf.5.gz ${mandest}/man5/ + xinstall -m 644 -W ${mansrc}/man5 rabbitmq-env.conf.5.gz ${mandest}/man5/ } pre-install { -- cgit v1.2.1 From f4c23c93527e9bd37243ee883b552b478427c7c2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 13:05:21 +0000 Subject: Additional word --- src/gm.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index b21217f6..70633a08 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -55,10 +55,10 @@ %% start_link/3 %% Provide the group name, the callback module name, and any arguments %% you wish to be passed into the callback module's functions. The -%% joined/2 will be called when we have joined the group, with the -%% arguments passed to start_link and a list of the current members of -%% the group. See the comments in behaviour_info/1 below for further -%% details of the callback functions. +%% joined/2 function will be called when we have joined the group, +%% with the arguments passed to start_link and a list of the current +%% members of the group. See the comments in behaviour_info/1 below +%% for further details of the callback functions. %% %% leave/1 %% Provide the Pid. Removes the Pid from the group. The callback -- cgit v1.2.1 From 1633fd03f06b5b43006ef83833d5a0c9f28c510f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 14:45:45 +0000 Subject: multiple_routing_keys/0 is not part of the backing_queue --- include/rabbit_backing_queue_spec.hrl | 1 - src/rabbit_variable_queue.erl | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 17cdedc2..accb2c0e 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,4 +65,3 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(multiple_routing_keys/0 :: () -> 'ok'). diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 3ef76d15..13fe9fda 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -353,6 +353,8 @@ -include("rabbit_backing_queue_spec.hrl"). +-spec(multiple_routing_keys/0 :: () -> 'ok'). + -endif. -define(BLANK_DELTA, #delta { start_seq_id = undefined, -- cgit v1.2.1 From c62cfd0cea0a4691d3b7806d0353eaeca8d7a375 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 14:46:30 +0000 Subject: remove blank trailing line --- src/rabbit_msg_store.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index ef0e2e0d..907f567b 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -2018,4 +2018,3 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), ok. - -- cgit v1.2.1 From fcb9a05d24be5a256de6539b0208371cf17aae8f Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 25 Feb 2011 16:12:21 +0000 Subject: Stricter msg store upgrade --- src/rabbit_msg_store.erl | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 907f567b..9e65e442 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1970,8 +1970,7 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), - [file:delete(filename:join(Dir, File)) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], + recover_crashed_compactions(BaseDir), ok. foreach_file(D, Fun, Files) -> @@ -1986,12 +1985,11 @@ transform_dir(BaseDir, Store, TransformFun) -> TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); - false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - foreach_file(Dir, TmpDir, TransformFile, OldFileList), - foreach_file(Dir, fun file:delete/1, OldFileList), - NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - foreach_file(TmpDir, Dir, fun file:copy/2, NewFileList), - foreach_file(TmpDir, fun file:delete/1, NewFileList), + false -> FileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), + foreach_file(Dir, TmpDir, TransformFile, FileList), + foreach_file(Dir, fun file:delete/1, FileList), + foreach_file(TmpDir, Dir, fun file:copy/2, FileList), + foreach_file(TmpDir, fun file:delete/1, FileList), ok = file:del_dir(TmpDir) end. @@ -2005,15 +2003,9 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), fun({Guid, _Size, _Offset, BinMsg}, ok) -> - case TransformFun(binary_to_term(BinMsg)) of - {ok, MsgNew} -> - {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), - ok; - {error, Reason} -> - error_logger:error_msg("Message transform failed: ~p~n", - [Reason]), - ok - end + {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), + {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + ok end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), -- cgit v1.2.1 From bbc9fcbcb631404e46259a606649a6bb5648db57 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Feb 2011 11:02:29 +0000 Subject: ...and untabify. --- src/rabbit_channel.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index d8a332f3..7dc07e5a 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1288,12 +1288,12 @@ is_message_persistent(Content) -> process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State, no_route), maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], - return_unroutable, State), + return_unroutable, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State, no_consumers), maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], - return_not_delivered, State), + return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); -- cgit v1.2.1 From 3bef7dc2825c1274c7f4869c34c2d5af6640e20f Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 1 Mar 2011 15:44:37 +0000 Subject: First cut of pushing edge cleaning out of main topic bind delete tx --- src/rabbit_exchange_type_topic.erl | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index c1741b30..a23df31f 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -66,16 +66,21 @@ add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, _X, Bs) -> - lists:foreach(fun remove_binding/1, Bs), + ToDelete = + lists:foldr(fun(B = #binding{source = X, destination = D}, Acc) -> + [{FinalNode, _} | _] = binding_path(B), + [{X, FinalNode, D} | Acc] + end, [], Bs), + [trie_remove_binding(X, FinalNode, D) || {X, FinalNode, D} <- ToDelete], ok; -remove_bindings(false, _X, _Bs) -> +remove_bindings(false, _X, Bs) -> + [rabbit_misc:execute_mnesia_transaction( + fun() -> remove_path_if_empty(X, binding_path(B)) end) + || B = #binding{source = X} <- Bs], ok. -remove_binding(#binding{source = X, key = K, destination = D}) -> - Path = [{FinalNode, _} | _] = follow_down_get_path(X, split_topic_key(K)), - trie_remove_binding(X, FinalNode, D), - remove_path_if_empty(X, Path), - ok. +binding_path(#binding{source = X, key = K}) -> + follow_down_get_path(X, split_topic_key(K)). assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). -- cgit v1.2.1 From 1ed39dee2676f0519cf061a08780202ee72f8aac Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 1 Mar 2011 16:10:55 +0000 Subject: Correct foldr -> foldl --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 0b43147d..25cdcc31 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -69,7 +69,7 @@ add_binding(false, _Exchange, _Binding) -> remove_bindings(true, _X, Bs) -> ToDelete = - lists:foldr(fun(B = #binding{source = X, destination = D}, Acc) -> + lists:foldl(fun(B = #binding{source = X, destination = D}, Acc) -> [{FinalNode, _} | _] = binding_path(B), [{X, FinalNode, D} | Acc] end, [], Bs), -- cgit v1.2.1 From 75b306010463265a291e84d91f9e13ebbd470714 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 2 Mar 2011 13:32:59 +0000 Subject: only confirm delivered messages that need confirming --- src/rabbit_variable_queue.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d1307b85..d0c984cb 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -510,8 +510,13 @@ publish(Msg, MsgProps, State) -> a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { guid = Guid }, - _MsgProps, State = #vqstate { len = 0 }) -> - blind_confirm(self(), gb_sets:singleton(Guid)), + MsgProps = #message_properties { + needs_confirming = NeedsConfirming }, + State = #vqstate { len = 0 }) -> + case NeedsConfirming of + true -> blind_confirm(self(), gb_sets:singleton(Guid)); + false -> ok + end, {undefined, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, -- cgit v1.2.1 From d1cc5c276f92b3d3a7aeea8754821fc191c24514 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 2 Mar 2011 15:44:41 +0000 Subject: Extracted ensure_ssl and ssl_transform_fun for use by STOMP --- src/rabbit_networking.erl | 62 +++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 36f61628..c0cb78f5 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -24,7 +24,8 @@ close_connection/2]). %%used by TCP-based transports, e.g. STOMP adapter --export([check_tcp_listener_address/2]). +-export([check_tcp_listener_address/2, + ensure_ssl/0, ssl_transform_fun/1]). -export([tcp_listener_started/3, tcp_listener_stopped/3, start_client/1, start_ssl_client/2]). @@ -88,19 +89,8 @@ boot_ssl() -> {ok, []} -> ok; {ok, SslListeners} -> - ok = rabbit_misc:start_applications([crypto, public_key, ssl]), - {ok, SslOptsConfig} = application:get_env(ssl_options), - % unknown_ca errors are silently ignored prior to R14B unless we - % supply this verify_fun - remove when at least R14B is required - SslOpts = - case proplists:get_value(verify, SslOptsConfig, verify_none) of - verify_none -> SslOptsConfig; - verify_peer -> [{verify_fun, fun([]) -> true; - ([_|_]) -> false - end} - | SslOptsConfig] - end, - [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners], + [start_ssl_listener(Listener, ensure_ssl()) + || Listener <- SslListeners], ok end. @@ -147,6 +137,34 @@ resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6; resolve_family(IP, auto) -> throw({error, {strange_family, IP}}); resolve_family(_, F) -> F. +ensure_ssl() -> + ok = rabbit_misc:start_applications([crypto, public_key, ssl]), + {ok, SslOptsConfig} = application:get_env(ssl_options), + + % unknown_ca errors are silently ignored prior to R14B unless we + % supply this verify_fun - remove when at least R14B is required + case proplists:get_value(verify, SslOptsConfig, verify_none) of + verify_none -> SslOptsConfig; + verify_peer -> [{verify_fun, fun([]) -> true; + ([_|_]) -> false + end} + | SslOptsConfig] + end. + +ssl_transform_fun(SslOpts) -> + fun (Sock) -> + case catch ssl:ssl_accept(Sock, SslOpts, ?SSL_TIMEOUT * 1000) of + {ok, SslSock} -> + rabbit_log:info("upgraded TCP connection ~p to SSL~n", + [self()]), + {ok, #ssl_socket{tcp = Sock, ssl = SslSock}}; + {error, Reason} -> + {error, {ssl_upgrade_error, Reason}}; + {'EXIT', Reason} -> + {error, {ssl_upgrade_failure, Reason}} + end + end. + check_tcp_listener_address(NamePrefix, Port) when is_integer(Port) -> check_tcp_listener_address_auto(NamePrefix, Port); @@ -246,21 +264,7 @@ start_client(Sock) -> start_client(Sock, fun (S) -> {ok, S} end). start_ssl_client(SslOpts, Sock) -> - start_client( - Sock, - fun (Sock1) -> - case catch ssl:ssl_accept(Sock1, SslOpts, ?SSL_TIMEOUT * 1000) of - {ok, SslSock} -> - rabbit_log:info("upgraded TCP connection ~p to SSL~n", - [self()]), - {ok, #ssl_socket{tcp = Sock1, ssl = SslSock}}; - {error, Reason} -> - {error, {ssl_upgrade_error, Reason}}; - {'EXIT', Reason} -> - {error, {ssl_upgrade_failure, Reason}} - - end - end). + start_client(Sock, ssl_transform_fun(SslOpts)). connections() -> [rabbit_connection_sup:reader(ConnSup) || -- cgit v1.2.1 From 6a88269b83e0e93d50e7e65435c9daeef0fc7ddb Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 2 Mar 2011 21:52:31 +0000 Subject: Always specify rabbit application when looking up ssl_options --- src/rabbit_networking.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index c0cb78f5..53be0190 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -139,7 +139,7 @@ resolve_family(_, F) -> F. ensure_ssl() -> ok = rabbit_misc:start_applications([crypto, public_key, ssl]), - {ok, SslOptsConfig} = application:get_env(ssl_options), + {ok, SslOptsConfig} = application:get_env(rabbit, ssl_options), % unknown_ca errors are silently ignored prior to R14B unless we % supply this verify_fun - remove when at least R14B is required -- cgit v1.2.1 From 5ac968c2f7a20f0b7b9da54c0ec72057b36abfd7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:05:41 +0000 Subject: Remove unused var --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d0c984cb..58a28d32 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -510,7 +510,7 @@ publish(Msg, MsgProps, State) -> a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { guid = Guid }, - MsgProps = #message_properties { + #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> case NeedsConfirming of -- cgit v1.2.1 From 912fd5c0df7a52e99e5c8386c4f3d9894b324f46 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:21:43 +0000 Subject: renaming --- src/rabbit_alarm.erl | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 365a5ed2..9ce468f0 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -25,7 +25,7 @@ -export([remote_conserve_memory/2]). %% Internal use only --record(alarms, {alertees, high_watermarks}). +-record(alarms, {alertees, alarmed_nodes}). %%---------------------------------------------------------------------------- @@ -78,27 +78,27 @@ remote_conserve_memory(Pid, Conserve) -> %%---------------------------------------------------------------------------- init([]) -> - {ok, #alarms{alertees = dict:new(), - high_watermarks = sets:new()}}. + {ok, #alarms{alertees = dict:new(), + alarmed_nodes = sets:new()}}. handle_call({register, Pid, HighMemMFA}, State) -> - {ok, 0 < sets:size(State#alarms.high_watermarks), + {ok, 0 < sets:size(State#alarms.alarmed_nodes), internal_register(Pid, HighMemMFA, State)}; handle_call(_Request, State) -> {ok, not_understood, State}. handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, - State = #alarms{high_watermarks = Highs}) -> - Highs1 = sets:add_element(Node, Highs), - ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, true), - {ok, State#alarms{high_watermarks = Highs1}}; + State = #alarms{alarmed_nodes = AN}) -> + AN1 = sets:add_element(Node, AN), + ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, true), + {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, - State = #alarms{high_watermarks = Highs}) -> - Highs1 = sets:del_element(Node, Highs), - ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), - {ok, State#alarms{high_watermarks = Highs1}}; + State = #alarms{alarmed_nodes = AN}) -> + AN1 = sets:del_element(Node, AN), + ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({node, up, Node}, State) -> %% Must do this via notify and not call to avoid possible deadlock. @@ -107,10 +107,10 @@ handle_event({node, up, Node}, State) -> {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node, down, Node}, State = #alarms{high_watermarks = Highs}) -> - Highs1 = sets:del_element(Node, Highs), - ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), - {ok, State#alarms{high_watermarks = Highs1}}; +handle_event({node, down, Node}, State = #alarms{alarmed_nodes = AN}) -> + AN1 = sets:del_element(Node, AN), + ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({register, Pid, HighMemMFA}, State) -> {ok, internal_register(Pid, HighMemMFA, State)}; @@ -173,7 +173,7 @@ alert(Alert, Alertees, NodeComparator) -> internal_register(Pid, {M, F, A} = HighMemMFA, State = #alarms{alertees = Alertees}) -> _MRef = erlang:monitor(process, Pid), - ok = case sets:is_element(node(), State#alarms.high_watermarks) of + ok = case sets:is_element(node(), State#alarms.alarmed_nodes) of true -> apply(M, F, A ++ [Pid, true]); false -> ok end, -- cgit v1.2.1 From a8253808e91b19dff6c7bb2b399a04f75005ee7f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:24:17 +0000 Subject: dafter renaming --- src/rabbit_alarm.erl | 14 ++++++++------ src/rabbit_node_monitor.erl | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 9ce468f0..82c921a2 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -18,7 +18,7 @@ -behaviour(gen_event). --export([start/0, stop/0, register/2, on_node/2]). +-export([start/0, stop/0, register/2, on_node_up/1, on_node_down/1]). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). @@ -35,7 +35,8 @@ -spec(start/0 :: () -> 'ok'). -spec(stop/0 :: () -> 'ok'). -spec(register/2 :: (pid(), mfa_tuple()) -> boolean()). --spec(on_node/2 :: ('up'|'down', node()) -> 'ok'). +-spec(on_node_up/1 :: (node()) -> 'ok'). +-spec(on_node_down/1 :: (node()) -> 'ok'). -endif. @@ -59,8 +60,9 @@ register(Pid, HighMemMFA) -> {register, Pid, HighMemMFA}, infinity). -on_node(Action, Node) -> - gen_event:notify(alarm_handler, {node, Action, Node}). +on_node_up(Node) -> gen_event:notify(alarm_handler, {node_up, Node}). + +on_node_down(Node) -> gen_event:notify(alarm_handler, {node_down, Node}). remote_conserve_memory(Pid, Conserve) -> RemoteNode = node(Pid), @@ -100,14 +102,14 @@ handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; -handle_event({node, up, Node}, State) -> +handle_event({node_up, Node}, State) -> %% Must do this via notify and not call to avoid possible deadlock. ok = gen_event:notify( {alarm_handler, Node}, {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node, down, Node}, State = #alarms{alarmed_nodes = AN}) -> +handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN}) -> AN1 = sets:del_element(Node, AN), ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 061f628d..1917c12c 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -69,7 +69,7 @@ handle_call(_Request, _From, State) -> handle_cast({rabbit_running_on, Node}, State) -> rabbit_log:info("node ~p up~n", [Node]), erlang:monitor(process, {rabbit, Node}), - ok = rabbit_alarm:on_node(up, Node), + ok = rabbit_alarm:on_node_up(Node), {noreply, State}; handle_cast(_Msg, State) -> {noreply, State}. @@ -99,4 +99,4 @@ code_change(_OldVsn, State, _Extra) -> handle_dead_rabbit(Node) -> ok = rabbit_networking:on_node_down(Node), ok = rabbit_amqqueue:on_node_down(Node), - ok = rabbit_alarm:on_node(down, Node). + ok = rabbit_alarm:on_node_down(Node). -- cgit v1.2.1 From dbaf1c2d62ecc348996e752c2b81ac684f3e00e0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:25:54 +0000 Subject: shrink code --- src/rabbit_alarm.erl | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 82c921a2..62c1cc74 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -64,18 +64,14 @@ on_node_up(Node) -> gen_event:notify(alarm_handler, {node_up, Node}). on_node_down(Node) -> gen_event:notify(alarm_handler, {node_down, Node}). -remote_conserve_memory(Pid, Conserve) -> - RemoteNode = node(Pid), - %% Can't use alarm_handler:{set,clear}_alarm because that doesn't - %% permit notifying a remote node. - case Conserve of - true -> gen_event:notify( - {alarm_handler, RemoteNode}, - {set_alarm, {{vm_memory_high_watermark, node()}, []}}); - false -> gen_event:notify( - {alarm_handler, RemoteNode}, - {clear_alarm, {vm_memory_high_watermark, node()}}) - end. +%% Can't use alarm_handler:{set,clear}_alarm because that doesn't +%% permit notifying a remote node. +remote_conserve_memory(Pid, true) -> + gen_event:notify({alarm_handler, node(Pid)}, + {set_alarm, {{vm_memory_high_watermark, node()}, []}}); +remote_conserve_memory(Pid, false) -> + gen_event:notify({alarm_handler, node(Pid)}, + {clear_alarm, {vm_memory_high_watermark, node()}}). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 63aa5b839ab9e30281c5a0cef9f45354e14374d9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:41:45 +0000 Subject: move code around and refactor a bit --- src/rabbit_alarm.erl | 56 ++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 62c1cc74..a4914c0b 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -87,15 +87,17 @@ handle_call(_Request, State) -> {ok, not_understood, State}. handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, - State = #alarms{alarmed_nodes = AN}) -> + State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> AN1 = sets:add_element(Node, AN), - ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, true), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, true), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, - State = #alarms{alarmed_nodes = AN}) -> + State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({node_up, Node}, State) -> @@ -105,9 +107,10 @@ handle_event({node_up, Node}, State) -> {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN}) -> +handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({register, Pid, HighMemMFA}, State) -> @@ -117,8 +120,8 @@ handle_event(_Event, State) -> {ok, State}. handle_info({'DOWN', _MRef, process, Pid, _Reason}, - State = #alarms{alertees = Alertess}) -> - {ok, State#alarms{alertees = dict:erase(Pid, Alertess)}}; + State = #alarms{alertees = Alertees}) -> + {ok, State#alarms{alertees = dict:erase(Pid, Alertees)}}; handle_info(_Info, State) -> {ok, State}. @@ -131,26 +134,23 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- -maybe_alert(Before, After, Alertees, AlarmNode, Action) - when AlarmNode =:= node() -> - %% If we have changed our alarm state, always inform the remotes. - case {sets:is_element(AlarmNode, Before), sets:is_element(AlarmNode, After), - Action} of - {false, true, true} -> alert_remote(Action, Alertees); - {true, false, false} -> alert_remote(Action, Alertees); - _ -> ok - end, - maybe_alert_local(Before, After, Alertees, Action); -maybe_alert(Before, After, Alertees, _AlarmNode, Action) -> - maybe_alert_local(Before, After, Alertees, Action). - -maybe_alert_local(Before, After, Alertees, Action) -> - %% If the overall alarm state has changed, inform the locals. - case {sets:size(Before), sets:size(After), Action} of - {0, 1, true} -> alert_local(Action, Alertees); - {1, 0, false} -> alert_local(Action, Alertees); - _ -> ok - end. +maybe_alert(BeforeSize, AfterSize, Alertees, AlarmNode, Action) -> + ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, + AlarmNode =:= node(), Action), + ok = maybe_alert_local(BeforeSize, AfterSize, Alertees, Action). + +%% If we have changed our alarm state, always inform the remotes. +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, true) + when BeforeSize < AfterSize -> alert_remote(true, Alertees); +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, false) + when BeforeSize > AfterSize -> alert_remote(false, Alertees); +maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode, _Action) -> + ok. + +%% If the overall alarm state has changed, inform the locals. +maybe_alert_local(0, 1, Alertees, true ) -> alert_local(true, Alertees); +maybe_alert_local(1, 0, Alertees, false ) -> alert_local(false, Alertees); +maybe_alert_local(_, _, _Alertees, _Action) -> ok. alert_local(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=:='/2). -- cgit v1.2.1 From 4a3a2daaed541572a5ae37a950f14964645305f1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:49:42 +0000 Subject: avoid tautology --- src/rabbit_alarm.erl | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index a4914c0b..508da5ee 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -90,14 +90,14 @@ handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = sets:add_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, true), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({node_up, Node}, State) -> @@ -110,7 +110,7 @@ handle_event({node_up, Node}, State) -> handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({register, Pid, HighMemMFA}, State) -> @@ -134,23 +134,22 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- -maybe_alert(BeforeSize, AfterSize, Alertees, AlarmNode, Action) -> - ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, - AlarmNode =:= node(), Action), - ok = maybe_alert_local(BeforeSize, AfterSize, Alertees, Action). +maybe_alert(BeforeSize, AfterSize, Alertees, AlmNde) -> + ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, AlmNde =:= node()), + ok = maybe_alert_local(BeforeSize, AfterSize, Alertees). %% If we have changed our alarm state, always inform the remotes. -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, true) +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) when BeforeSize < AfterSize -> alert_remote(true, Alertees); -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, false) +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) when BeforeSize > AfterSize -> alert_remote(false, Alertees); -maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode, _Action) -> +maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode) -> ok. %% If the overall alarm state has changed, inform the locals. -maybe_alert_local(0, 1, Alertees, true ) -> alert_local(true, Alertees); -maybe_alert_local(1, 0, Alertees, false ) -> alert_local(false, Alertees); -maybe_alert_local(_, _, _Alertees, _Action) -> ok. +maybe_alert_local(0, 1, Alertees) -> alert_local(true, Alertees); +maybe_alert_local(1, 0, Alertees) -> alert_local(false, Alertees); +maybe_alert_local(_, _, _Alertees) -> ok. alert_local(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=:='/2). -- cgit v1.2.1 From ffd695bed82709c57064fcaf46606b607e474140 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 08:11:07 +0000 Subject: simplify --- src/rabbit_alarm.erl | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 508da5ee..34cc4d3c 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -86,19 +86,11 @@ handle_call({register, Pid, HighMemMFA}, State) -> handle_call(_Request, State) -> {ok, not_understood, State}. -handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, - State = #alarms{alarmed_nodes = AN, - alertees = Alertees}) -> - AN1 = sets:add_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), - {ok, State#alarms{alarmed_nodes = AN1}}; +handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, State) -> + {ok, maybe_alert(fun sets:add_element/2, Node, State)}; -handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, - State = #alarms{alarmed_nodes = AN, - alertees = Alertees}) -> - AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), - {ok, State#alarms{alarmed_nodes = AN1}}; +handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, State) -> + {ok, maybe_alert(fun sets:del_element/2, Node, State)}; handle_event({node_up, Node}, State) -> %% Must do this via notify and not call to avoid possible deadlock. @@ -107,11 +99,8 @@ handle_event({node_up, Node}, State) -> {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN, - alertees = Alertees}) -> - AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), - {ok, State#alarms{alarmed_nodes = AN1}}; +handle_event({node_down, Node}, State) -> + {ok, maybe_alert(fun sets:del_element/2, Node, State)}; handle_event({register, Pid, HighMemMFA}, State) -> {ok, internal_register(Pid, HighMemMFA, State)}; @@ -134,6 +123,12 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- +maybe_alert(SetFun, Node, State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> + AN1 = SetFun(Node, AN), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), + State#alarms{alarmed_nodes = AN1}. + maybe_alert(BeforeSize, AfterSize, Alertees, AlmNde) -> ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, AlmNde =:= node()), ok = maybe_alert_local(BeforeSize, AfterSize, Alertees). -- cgit v1.2.1 From 2570ed2e55fb40e2853652a5e94719ddb2a9e78e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 08:29:11 +0000 Subject: yet more simplification --- src/rabbit_alarm.erl | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 34cc4d3c..9f88d8da 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -126,31 +126,25 @@ code_change(_OldVsn, State, _Extra) -> maybe_alert(SetFun, Node, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = SetFun(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), + BeforeSz = sets:size(AN), + AfterSz = sets:size(AN1), + %% If we have changed our alarm state, inform the remotes. + IsLocal = Node =:= node(), + if IsLocal andalso BeforeSz < AfterSz -> ok = alert_remote(true, Alertees); + IsLocal andalso BeforeSz > AfterSz -> ok = alert_remote(false, Alertees); + true -> ok + end, + %% If the overall alarm state has changed, inform the locals. + case {BeforeSz, AfterSz} of + {0, 1} -> ok = alert_local(true, Alertees); + {1, 0} -> ok = alert_local(false, Alertees); + {_, _} -> ok + end, State#alarms{alarmed_nodes = AN1}. -maybe_alert(BeforeSize, AfterSize, Alertees, AlmNde) -> - ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, AlmNde =:= node()), - ok = maybe_alert_local(BeforeSize, AfterSize, Alertees). +alert_local(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=:='/2). -%% If we have changed our alarm state, always inform the remotes. -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) - when BeforeSize < AfterSize -> alert_remote(true, Alertees); -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) - when BeforeSize > AfterSize -> alert_remote(false, Alertees); -maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode) -> - ok. - -%% If the overall alarm state has changed, inform the locals. -maybe_alert_local(0, 1, Alertees) -> alert_local(true, Alertees); -maybe_alert_local(1, 0, Alertees) -> alert_local(false, Alertees); -maybe_alert_local(_, _, _Alertees) -> ok. - -alert_local(Alert, Alertees) -> - alert(Alert, Alertees, fun erlang:'=:='/2). - -alert_remote(Alert, Alertees) -> - alert(Alert, Alertees, fun erlang:'=/='/2). +alert_remote(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=/='/2). alert(Alert, Alertees, NodeComparator) -> Node = node(), -- cgit v1.2.1 From 976787bbbaf1ebbae5e7c620f8b8ae40f55afd71 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 08:34:11 +0000 Subject: cosmetic --- src/rabbit_alarm.erl | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 9f88d8da..d38ecb91 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -148,20 +148,19 @@ alert_remote(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=/='/2). alert(Alert, Alertees, NodeComparator) -> Node = node(), - dict:fold(fun (Pid, {M, F, A}, Acc) -> + dict:fold(fun (Pid, {M, F, A}, ok) -> case NodeComparator(Node, node(Pid)) of - true -> ok = erlang:apply(M, F, A ++ [Pid, Alert]), - Acc; - false -> Acc + true -> apply(M, F, A ++ [Pid, Alert]); + false -> ok end end, ok, Alertees). internal_register(Pid, {M, F, A} = HighMemMFA, State = #alarms{alertees = Alertees}) -> _MRef = erlang:monitor(process, Pid), - ok = case sets:is_element(node(), State#alarms.alarmed_nodes) of - true -> apply(M, F, A ++ [Pid, true]); - false -> ok - end, + case sets:is_element(node(), State#alarms.alarmed_nodes) of + true -> ok = apply(M, F, A ++ [Pid, true]); + false -> ok + end, NewAlertees = dict:store(Pid, HighMemMFA, Alertees), State#alarms{alertees = NewAlertees}. -- cgit v1.2.1 From c5b8dbd5f5526a815620f5d7385040b7fc91c4c3 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Fri, 4 Mar 2011 10:41:47 +0000 Subject: Reworked binding delete into single transaction again --- src/rabbit_exchange_type_topic.erl | 84 ++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 31 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 25cdcc31..08e16661 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -68,19 +68,56 @@ add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, _X, Bs) -> - ToDelete = - lists:foldl(fun(B = #binding{source = X, destination = D}, Acc) -> - [{FinalNode, _} | _] = binding_path(B), - [{X, FinalNode, D} | Acc] - end, [], Bs), + {ToDelete, Paths} = + lists:foldl( + fun(B = #binding{source = X, destination = D}, {Acc, PathAcc}) -> + Path = [{FinalNode, _} | _] = binding_path(B), + PathAcc1 = decrement_bindings(X, Path, maybe_add_path( + X, Path, PathAcc)), + {[{X, FinalNode, D} | Acc], PathAcc1} + end, {[], gb_trees:empty()}, Bs), + [trie_remove_binding(X, FinalNode, D) || {X, FinalNode, D} <- ToDelete], + [trie_remove_edge(X, Parent, Node, W) || + {{X, [{Node, W}, {Parent, _} | _ ]}, {0, 0}} + <- gb_trees:to_list(Paths)], ok; -remove_bindings(false, _X, Bs) -> - [rabbit_misc:execute_mnesia_transaction( - fun() -> remove_path_if_empty(X, binding_path(B)) end) - || B = #binding{source = X} <- Bs], +remove_bindings(false, _X, _Bs) -> ok. +maybe_add_path(_X, [{root, none}], PathAcc) -> + PathAcc; +maybe_add_path(X, Path, PathAcc) -> + case gb_trees:is_defined({X, Path}, PathAcc) of + true -> PathAcc; + false -> gb_trees:insert({X, Path}, counts(X, Path), PathAcc) + end. + +decrement_bindings(X, Path, PathAcc) -> + with_path_acc(fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, + X, Path, PathAcc). + +decrement_edges(X, Path, PathAcc) -> + with_path_acc(fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, + X, Path, PathAcc). + +with_path_acc(_Fun, _X, [{root, none}], PathAcc) -> + PathAcc; +with_path_acc(Fun, X, Path, PathAcc) -> + NewVal = Fun(gb_trees:get({X, Path}, PathAcc)), + NewPathAcc = gb_trees:update({X, Path}, NewVal, PathAcc), + case NewVal of + {0, 0} -> + [_ | ParentPath] = Path, + decrement_edges(X, ParentPath, + maybe_add_path(X, ParentPath, NewPathAcc)); + _ -> + NewPathAcc + end. + +counts(X, [{FinalNode, _} | _]) -> + {trie_binding_count(X, FinalNode), trie_child_count(X, FinalNode)}. + binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). @@ -151,15 +188,6 @@ follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) -> error -> {error, Acc, Words} end. -remove_path_if_empty(_, [{root, none}]) -> - ok; -remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) -> - case trie_has_any_bindings(X, Node) orelse trie_has_any_children(X, Node) of - true -> ok; - false -> trie_remove_edge(X, Parent, Node, W), - remove_path_if_empty(X, RestPath) - end. - trie_child(X, Node, Word) -> case mnesia:read(rabbit_topic_trie_edge, #trie_edge{exchange_name = X, @@ -204,21 +232,24 @@ trie_binding_op(X, Node, D, Op) -> destination = D}}, write). -trie_has_any_children(X, Node) -> - has_any(rabbit_topic_trie_edge, +trie_child_count(X, Node) -> + count(rabbit_topic_trie_edge, #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, node_id = Node, _ = '_'}, _ = '_'}). -trie_has_any_bindings(X, Node) -> - has_any(rabbit_topic_trie_binding, +trie_binding_count(X, Node) -> + count(rabbit_topic_trie_binding, #topic_trie_binding{ trie_binding = #trie_binding{exchange_name = X, node_id = Node, _ = '_'}, _ = '_'}). +count(Table, Match) -> + length(mnesia:match_object(Table, Match, read)). + trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, @@ -231,15 +262,6 @@ trie_remove_all_bindings(X) -> trie_binding = #trie_binding{exchange_name = X, _ = '_'}, _ = '_'}). -has_any(Table, MatchHead) -> - Select = mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read), - select_while_no_result(Select) /= '$end_of_table'. - -select_while_no_result({[], Cont}) -> - select_while_no_result(mnesia:select(Cont)); -select_while_no_result(Other) -> - Other. - remove_all(Table, Pattern) -> lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end, mnesia:match_object(Table, Pattern, write)). -- cgit v1.2.1 From b6058d0b1bef5c5f9eddff225ff2accc70eea086 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 15:41:25 +0000 Subject: beginnings of decoupling bq from amqqueue - parameterise bq with callbacks --- include/rabbit_backing_queue_spec.hrl | 6 +- src/rabbit_amqqueue_process.erl | 18 +++- src/rabbit_backing_queue.erl | 2 +- src/rabbit_tests.erl | 16 ++-- src/rabbit_variable_queue.erl | 162 ++++++++++++++++++---------------- 5 files changed, 114 insertions(+), 90 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index accb2c0e..2e4d1b0a 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -25,11 +25,13 @@ -type(message_properties_transformer() :: fun ((rabbit_types:message_properties()) -> rabbit_types:message_properties())). +-type(async_callback() :: fun ((fun ((state()) -> state())) -> 'ok')). +-type(sync_callback() :: fun ((fun ((state()) -> state())) -> 'ok' | 'error')). -spec(start/1 :: ([rabbit_amqqueue:name()]) -> 'ok'). -spec(stop/0 :: () -> 'ok'). --spec(init/3 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery()) -> - state()). +-spec(init/5 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery(), + async_callback(), sync_callback()) -> state()). -spec(terminate/1 :: (state()) -> state()). -spec(delete_and_terminate/1 :: (state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 44053593..cf2a3949 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -149,7 +149,7 @@ declare(Recover, From, ok = rabbit_memory_monitor:register( self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), - BQS = BQ:init(QName, IsDurable, Recover), + BQS = bq_init(BQ, QName, IsDurable, Recover), State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), @@ -159,6 +159,20 @@ declare(Recover, From, Q1 -> {stop, normal, {existing, Q1}, State} end. +bq_init(BQ, QName, IsDurable, Recover) -> + Self = self(), + BQ:init(QName, IsDurable, Recover, + fun (Fun) -> + rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + Self, Fun) + end, + fun (Fun) -> + rabbit_misc:with_exit_handler( + fun () -> error end, + fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( + Self, Fun) end) + end). + process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> lists:foldl(fun({Arg, Fun}, State1) -> case rabbit_misc:table_lookup(Arguments, Arg) of @@ -797,7 +811,7 @@ handle_call({init, Recover}, From, _ -> rabbit_log:warning( "Queue ~p exclusive owner went away~n", [QName]) end, - BQS = BQ:init(QName, IsDurable, Recover), + BQS = bq_init(BQ, QName, IsDurable, Recover), %% Rely on terminate to delete the queue. {stop, normal, State#q{backing_queue_state = BQS}} end; diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 6a21e10f..a8e201ea 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -33,7 +33,7 @@ behaviour_info(callbacks) -> {stop, 0}, %% Initialise the backing queue and its state. - {init, 3}, + {init, 5}, %% Called on queue shutdown when queue isn't being deleted. {terminate, 1}, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c6250df..99bb1c4b 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2003,6 +2003,10 @@ test_queue_index() -> passed. +variable_queue_init(QName, IsDurable, Recover) -> + rabbit_variable_queue:init(QName, IsDurable, Recover, + fun nop/1, fun nop/1, fun nop/2, fun nop/1). + variable_queue_publish(IsPersistent, Count, VQ) -> lists:foldl( fun (_N, VQN) -> @@ -2033,8 +2037,7 @@ assert_props(List, PropVals) -> with_fresh_variable_queue(Fun) -> ok = empty_test_queue(), - VQ = rabbit_variable_queue:init(test_queue(), true, false, - fun nop/2, fun nop/1), + VQ = variable_queue_init(test_queue(), true, false), S0 = rabbit_variable_queue:status(VQ), assert_props(S0, [{q1, 0}, {q2, 0}, {delta, {delta, undefined, 0, undefined}}, @@ -2209,8 +2212,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = rabbit_variable_queue:init(test_queue(), true, true, - fun nop/2, fun nop/1), + VQ7 = variable_queue_init(test_queue(), true, true), {{_Msg1, true, _AckTag1, Count1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), VQ9 = variable_queue_publish(false, 1, VQ8), @@ -2226,8 +2228,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> VQ4 = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:idle_timeout(VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = rabbit_variable_queue:init(test_queue(), true, true, - fun nop/2, fun nop/1), + VQ7 = variable_queue_init(test_queue(), true, true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), VQ8. @@ -2258,8 +2259,7 @@ test_queue_recover() -> {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} = rabbit_amqqueue:basic_get(Q1, self(), false), exit(QPid1, shutdown), - VQ1 = rabbit_variable_queue:init(QName, true, true, - fun nop/2, fun nop/1), + VQ1 = variable_queue_init(QName, true, true), {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 58a28d32..7f702409 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -16,7 +16,7 @@ -module(rabbit_variable_queue). --export([init/3, terminate/1, delete_and_terminate/1, +-export([init/5, terminate/1, delete_and_terminate/1, purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, @@ -27,7 +27,7 @@ -export([start/1, stop/0]). %% exported for testing only --export([start_msg_store/2, stop_msg_store/0, init/5]). +-export([start_msg_store/2, stop_msg_store/0, init/7]). %%---------------------------------------------------------------------------- %% Definitions: @@ -238,6 +238,9 @@ durable, transient_threshold, + async_callback, + sync_callback, + len, persistent_count, @@ -332,11 +335,14 @@ {any(), binary()}}, on_sync :: sync(), durable :: boolean(), + transient_threshold :: non_neg_integer(), + + async_callback :: async_callback(), + sync_callback :: sync_callback(), len :: non_neg_integer(), persistent_count :: non_neg_integer(), - transient_threshold :: non_neg_integer(), target_ram_count :: non_neg_integer() | 'infinity', ram_msg_count :: non_neg_integer(), ram_msg_count_prev :: non_neg_integer(), @@ -397,25 +403,26 @@ stop_msg_store() -> ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). -init(QueueName, IsDurable, Recover) -> - Self = self(), - init(QueueName, IsDurable, Recover, +init(QueueName, IsDurable, Recover, AsyncCallback, SyncCallback) -> + init(QueueName, IsDurable, Recover, AsyncCallback, SyncCallback, fun (Guids, ActionTaken) -> - msgs_written_to_disk(Self, Guids, ActionTaken) + msgs_written_to_disk(AsyncCallback, Guids, ActionTaken) end, - fun (Guids) -> msg_indices_written_to_disk(Self, Guids) end). + fun (Guids) -> msg_indices_written_to_disk(AsyncCallback, Guids) end). -init(QueueName, IsDurable, false, MsgOnDiskFun, MsgIdxOnDiskFun) -> +init(QueueName, IsDurable, false, AsyncCallback, SyncCallback, + MsgOnDiskFun, MsgIdxOnDiskFun) -> IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), - init(IsDurable, IndexState, 0, [], + init(IsDurable, IndexState, 0, [], AsyncCallback, SyncCallback, case IsDurable of true -> msg_store_client_init(?PERSISTENT_MSG_STORE, - MsgOnDiskFun); + MsgOnDiskFun, AsyncCallback); false -> undefined end, - msg_store_client_init(?TRANSIENT_MSG_STORE, undefined)); + msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback)); -init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> +init(QueueName, true, true, AsyncCallback, SyncCallback, + MsgOnDiskFun, MsgIdxOnDiskFun) -> Terms = rabbit_queue_index:shutdown_terms(QueueName), {PRef, TRef, Terms1} = case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of @@ -425,9 +432,9 @@ init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> _ -> {rabbit_guid:guid(), rabbit_guid:guid(), []} end, PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef, - MsgOnDiskFun), + MsgOnDiskFun, AsyncCallback), TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE, TRef, - undefined), + undefined, AsyncCallback), {DeltaCount, IndexState} = rabbit_queue_index:recover( QueueName, Terms1, @@ -437,7 +444,7 @@ init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> end, MsgIdxOnDiskFun), init(true, IndexState, DeltaCount, Terms1, - PersistentClient, TransientClient). + PersistentClient, TransientClient, AsyncCallback, SyncCallback). terminate(State) -> State1 = #vqstate { persistent_count = PCount, @@ -512,9 +519,9 @@ publish(Msg, MsgProps, State) -> publish_delivered(false, #basic_message { guid = Guid }, #message_properties { needs_confirming = NeedsConfirming }, - State = #vqstate { len = 0 }) -> + State = #vqstate { async_callback = Callback, len = 0 }) -> case NeedsConfirming of - true -> blind_confirm(self(), gb_sets:singleton(Guid)); + true -> blind_confirm(Callback, gb_sets:singleton(Guid)); false -> ok end, {undefined, a(State)}; @@ -685,6 +692,8 @@ tx_rollback(Txn, State = #vqstate { durable = IsDurable, tx_commit(Txn, Fun, MsgPropsFun, State = #vqstate { durable = IsDurable, + async_callback = AsyncCallback, + sync_callback = SyncCallback, msg_store_clients = MSCState }) -> #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), erase_tx(Txn), @@ -696,7 +705,8 @@ tx_commit(Txn, Fun, MsgPropsFun, true -> ok = msg_store_sync( MSCState, true, PersistentGuids, msg_store_callback(PersistentGuids, Pubs, AckTags1, - Fun, MsgPropsFun)), + Fun, MsgPropsFun, + AsyncCallback, SyncCallback)), State; false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, Fun, MsgPropsFun, State) @@ -929,13 +939,13 @@ with_immutable_msg_store_state(MSCState, IsPersistent, Fun) -> end), Res. -msg_store_client_init(MsgStore, MsgOnDiskFun) -> - msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun). +msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) -> + msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun, Callback). -msg_store_client_init(MsgStore, Ref, MsgOnDiskFun) -> +msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) -> rabbit_msg_store:client_init( MsgStore, Ref, MsgOnDiskFun, - msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE)). + msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE, Callback)). msg_store_write(MSCState, IsPersistent, Guid, Msg) -> with_immutable_msg_store_state( @@ -967,16 +977,13 @@ msg_store_close_fds(MSCState, IsPersistent) -> MSCState, IsPersistent, fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). -msg_store_close_fds_fun(IsPersistent) -> - Self = self(), - fun () -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - Self, - fun (State = #vqstate { msg_store_clients = MSCState }) -> - {ok, MSCState1} = - msg_store_close_fds(MSCState, IsPersistent), - {[], State #vqstate { msg_store_clients = MSCState1 }} - end) +msg_store_close_fds_fun(IsPersistent, Callback) -> + fun () -> Callback( + fun (State = #vqstate { msg_store_clients = MSCState }) -> + {ok, MSCState1} = + msg_store_close_fds(MSCState, IsPersistent), + {[], State #vqstate { msg_store_clients = MSCState1 }} + end) end. maybe_write_delivered(false, _SeqId, IndexState) -> @@ -1062,7 +1069,7 @@ update_rate(Now, Then, Count, {OThen, OCount}) -> %%---------------------------------------------------------------------------- init(IsDurable, IndexState, DeltaCount, Terms, - PersistentClient, TransientClient) -> + PersistentClient, TransientClient, AsyncCallback, SyncCallback) -> {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount), @@ -1088,6 +1095,9 @@ init(IsDurable, IndexState, DeltaCount, Terms, durable = IsDurable, transient_threshold = NextSeqId, + async_callback = AsyncCallback, + sync_callback = SyncCallback, + len = DeltaCount1, persistent_count = DeltaCount1, @@ -1114,23 +1124,24 @@ blank_rate(Timestamp, IngressLength) -> avg_ingress = 0.0, timestamp = Timestamp }. -msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> - Self = self(), - F = fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( - Self, fun (StateN) -> {[], tx_commit_post_msg_store( - true, Pubs, AckTags, - Fun, MsgPropsFun, StateN)} - end) - end, - fun () -> spawn(fun () -> ok = rabbit_misc:with_exit_handler( - fun () -> remove_persistent_messages( - PersistentGuids) - end, F) +msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun, + AsyncCallback, SyncCallback) -> + fun () -> spawn(fun () -> case SyncCallback( + fun (StateN) -> + tx_commit_post_msg_store( + true, Pubs, AckTags, + Fun, MsgPropsFun, StateN) + end) of + ok -> ok; + error -> remove_persistent_messages( + PersistentGuids, AsyncCallback) + end end) end. -remove_persistent_messages(Guids) -> - PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, undefined), +remove_persistent_messages(Guids, AsyncCallback) -> + PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, + undefined, AsyncCallback), ok = rabbit_msg_store:remove(Guids, PersistentClient), rabbit_msg_store:client_delete_and_terminate(PersistentClient). @@ -1442,35 +1453,32 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, msgs_confirmed(GuidSet, State) -> {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. -blind_confirm(QPid, GuidSet) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State) -> msgs_confirmed(GuidSet, State) end). - -msgs_written_to_disk(QPid, GuidSet, removed) -> - blind_confirm(QPid, GuidSet); -msgs_written_to_disk(QPid, GuidSet, written) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), - State #vqstate { - msgs_on_disk = - gb_sets:union( - MOD, gb_sets:intersection(UC, GuidSet)) }) - end). - -msg_indices_written_to_disk(QPid, GuidSet) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), - State #vqstate { - msg_indices_on_disk = - gb_sets:union( - MIOD, gb_sets:intersection(UC, GuidSet)) }) - end). +blind_confirm(Callback, GuidSet) -> + Callback(fun (State) -> msgs_confirmed(GuidSet, State) end). + +msgs_written_to_disk(Callback, GuidSet, removed) -> + blind_confirm(Callback, GuidSet); +msgs_written_to_disk(Callback, GuidSet, written) -> + Callback(fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:union( + MOD, gb_sets:intersection(UC, GuidSet)) }) + end). + +msg_indices_written_to_disk(Callback, GuidSet) -> + Callback(fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:union( + MIOD, gb_sets:intersection(UC, GuidSet)) }) + end). %%---------------------------------------------------------------------------- %% Phase changes -- cgit v1.2.1 From 55494a8fe0850e22c57609e41f6c525a80064991 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 4 Mar 2011 17:16:32 +0000 Subject: compromise renaming --- src/rabbit_channel.erl | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index fe6522fe..77960e45 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -298,8 +298,10 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, State = #ch{consumer_monitors = ConsumerMonitors}) -> noreply( case dict:find(MRef, ConsumerMonitors) of - error -> handle_queue_down(QPid, Reason, State); - {ok, ConsumerTag} -> handle_consumer_down(MRef, ConsumerTag, State) + error -> + handle_publishing_queue_down(QPid, Reason, State); + {ok, ConsumerTag} -> + handle_consuming_queue_down(MRef, ConsumerTag, State) end). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> @@ -1103,7 +1105,7 @@ monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, State end. -handle_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> +handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> MsgSeqNos = case gb_trees:lookup(QPid, UQM) of {value, MsgSet} -> gb_sets:to_list(MsgSet); none -> [] @@ -1120,10 +1122,10 @@ handle_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> end)(MXs, State2), queue_blocked(QPid, State3). -handle_consumer_down(MRef, ConsumerTag, - State = #ch{consumer_mapping = ConsumerMapping, - consumer_monitors = ConsumerMonitors, - writer_pid = WriterPid}) -> +handle_consuming_queue_down(MRef, ConsumerTag, + State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, + writer_pid = WriterPid}) -> ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping), ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors), Cancel = #'basic.cancel'{consumer_tag = ConsumerTag, -- cgit v1.2.1 From b155306db41afb224a90bd20f142700c42a97efc Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 17:22:18 +0000 Subject: introduce separate type for msg ids and add some auxiliary types for fun params to a) make their purpose clearer, and b) work around emacs indentation bugs --- include/rabbit_msg_store_index.hrl | 8 ++++---- src/rabbit_amqqueue.erl | 4 ++-- src/rabbit_msg_file.erl | 12 +++++++----- src/rabbit_msg_store.erl | 22 +++++++++++----------- src/rabbit_queue_index.erl | 21 ++++++++++----------- src/rabbit_types.erl | 5 +++-- 6 files changed, 37 insertions(+), 35 deletions(-) diff --git a/include/rabbit_msg_store_index.hrl b/include/rabbit_msg_store_index.hrl index 289f8f60..2ae5b000 100644 --- a/include/rabbit_msg_store_index.hrl +++ b/include/rabbit_msg_store_index.hrl @@ -29,13 +29,13 @@ -spec(new/1 :: (dir()) -> index_state()). -spec(recover/1 :: (dir()) -> rabbit_types:ok_or_error2(index_state(), any())). -spec(lookup/2 :: - (rabbit_guid:guid(), index_state()) -> ('not_found' | keyvalue())). + (rabbit_types:msg_id(), index_state()) -> ('not_found' | keyvalue())). -spec(insert/2 :: (keyvalue(), index_state()) -> 'ok'). -spec(update/2 :: (keyvalue(), index_state()) -> 'ok'). --spec(update_fields/3 :: (rabbit_guid:guid(), ({fieldpos(), fieldvalue()} | - [{fieldpos(), fieldvalue()}]), +-spec(update_fields/3 :: (rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} | + [{fieldpos(), fieldvalue()}]), index_state()) -> 'ok'). --spec(delete/2 :: (rabbit_guid:guid(), index_state()) -> 'ok'). +-spec(delete/2 :: (rabbit_types:msg_id(), index_state()) -> 'ok'). -spec(delete_object/2 :: (keyvalue(), index_state()) -> 'ok'). -spec(delete_by_file/2 :: (fieldvalue(), index_state()) -> 'ok'). -spec(terminate/1 :: (index_state()) -> any()). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 46b78c39..bbeff1f7 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -141,9 +141,9 @@ fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). -spec(maybe_run_queue_via_backing_queue/2 :: - (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). + (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(maybe_run_queue_via_backing_queue_async/2 :: - (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). + (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 55e6ac47..71b4aa6f 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -39,15 +39,17 @@ -type(position() :: non_neg_integer()). -type(msg_size() :: non_neg_integer()). -type(file_size() :: non_neg_integer()). +-type(message_accumulator(A) :: + fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) -> + A)). --spec(append/3 :: (io_device(), rabbit_guid:guid(), msg()) -> +-spec(append/3 :: (io_device(), rabbit_types:msg_id(), msg()) -> rabbit_types:ok_or_error2(msg_size(), any())). -spec(read/2 :: (io_device(), msg_size()) -> - rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, + rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()}, any())). --spec(scan/4 :: (io_device(), file_size(), - fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), - A) -> {'ok', A, position()}). +-spec(scan/4 :: (io_device(), file_size(), message_accumulator(A), A) -> + {'ok', A, position()}). -endif. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 9e65e442..02811da7 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -132,30 +132,30 @@ file_summary_ets :: ets:tid(), dedup_cache_ets :: ets:tid(), cur_file_cache_ets :: ets:tid()}). --type(startup_fun_state() :: - {(fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A})), - A}). +-type(msg_ref_delta_gen(A) :: + fun ((A) -> 'finished' | + {rabbit_types:msg_id(), non_neg_integer(), A})). -type(maybe_guid_fun() :: 'undefined' | fun ((gb_set()) -> any())). -type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')). -type(deletion_thunk() :: fun (() -> boolean())). -spec(start_link/4 :: (atom(), file:filename(), [binary()] | 'undefined', - startup_fun_state()) -> rabbit_types:ok_pid_or_error()). + {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()). -spec(successfully_recovered_state/1 :: (server()) -> boolean()). -spec(client_init/4 :: (server(), client_ref(), maybe_guid_fun(), maybe_close_fds_fun()) -> client_msstate()). -spec(client_terminate/1 :: (client_msstate()) -> 'ok'). -spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok'). -spec(client_ref/1 :: (client_msstate()) -> client_ref()). --spec(write/3 :: (rabbit_guid:guid(), msg(), client_msstate()) -> 'ok'). --spec(read/2 :: (rabbit_guid:guid(), client_msstate()) -> +-spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'). +-spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) -> {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). --spec(contains/2 :: (rabbit_guid:guid(), client_msstate()) -> boolean()). --spec(remove/2 :: ([rabbit_guid:guid()], client_msstate()) -> 'ok'). --spec(release/2 :: ([rabbit_guid:guid()], client_msstate()) -> 'ok'). --spec(sync/3 :: ([rabbit_guid:guid()], fun (() -> any()), client_msstate()) -> - 'ok'). +-spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). +-spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). +-spec(release/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). +-spec(sync/3 :: + ([rabbit_types:msg_id()], fun (() -> any()), client_msstate()) -> 'ok'). -spec(sync/1 :: (server()) -> 'ok'). -spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok'). diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 76b1136f..7b5aa120 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -187,21 +187,21 @@ dirty_count :: integer(), max_journal_entries :: non_neg_integer(), on_sync :: on_sync_fun(), - unsynced_guids :: [rabbit_guid:guid()] - }). --type(startup_fun_state() :: - {fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A}), - A}). + unsynced_guids :: [rabbit_types:msg_id()] + }). +-type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())). +-type(walker(A) :: fun ((A) -> 'finished' | + {rabbit_types:msg_id(), non_neg_integer(), A})). -type(shutdown_terms() :: [any()]). -spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()). -spec(shutdown_terms/1 :: (rabbit_amqqueue:name()) -> shutdown_terms()). -spec(recover/5 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(), - fun ((rabbit_guid:guid()) -> boolean()), on_sync_fun()) -> - {'undefined' | non_neg_integer(), qistate()}). + contains_predicate(), on_sync_fun()) -> + {'undefined' | non_neg_integer(), qistate()}). -spec(terminate/2 :: ([any()], qistate()) -> qistate()). -spec(delete_and_terminate/1 :: (qistate()) -> qistate()). --spec(publish/5 :: (rabbit_guid:guid(), seq_id(), +-spec(publish/5 :: (rabbit_types:msg_id(), seq_id(), rabbit_types:message_properties(), boolean(), qistate()) -> qistate()). -spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()). @@ -209,14 +209,13 @@ -spec(sync/2 :: ([seq_id()], qistate()) -> qistate()). -spec(flush/1 :: (qistate()) -> qistate()). -spec(read/3 :: (seq_id(), seq_id(), qistate()) -> - {[{rabbit_guid:guid(), seq_id(), + {[{rabbit_types:msg_id(), seq_id(), rabbit_types:message_properties(), boolean(), boolean()}], qistate()}). -spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()). -spec(bounds/1 :: (qistate()) -> {non_neg_integer(), non_neg_integer(), qistate()}). --spec(recover/1 :: ([rabbit_amqqueue:name()]) -> - {[[any()]], startup_fun_state()}). +-spec(recover/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}). -spec(add_queue_ttl/0 :: () -> 'ok'). diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index ab2300c0..899291f2 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -21,7 +21,7 @@ -ifdef(use_specs). -export_type([txn/0, maybe/1, info/0, infos/0, info_key/0, info_keys/0, - message/0, basic_message/0, + message/0, msg_id/0, basic_message/0, delivery/0, content/0, decoded_content/0, undecoded_content/0, unencoded_content/0, encoded_content/0, message_properties/0, vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0, @@ -62,11 +62,12 @@ properties_bin :: binary(), payload_fragments_rev :: [binary()]}). -type(content() :: undecoded_content() | decoded_content()). +-type(msg_id() :: rabbit_guid:guid()). -type(basic_message() :: #basic_message{exchange_name :: rabbit_exchange:name(), routing_keys :: [rabbit_router:routing_key()], content :: content(), - guid :: rabbit_guid:guid(), + guid :: msg_id(), is_persistent :: boolean()}). -type(message() :: basic_message()). -type(delivery() :: -- cgit v1.2.1 From c0304ad94f0862f6cae9d33dac434144b17ea309 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 17:46:50 +0000 Subject: guid -> msg_id in msg_store and friends --- include/rabbit_msg_store.hrl | 3 +- src/rabbit_msg_file.erl | 31 ++-- src/rabbit_msg_store.erl | 341 +++++++++++++++++++------------------ src/rabbit_msg_store_ets_index.erl | 2 +- 4 files changed, 190 insertions(+), 187 deletions(-) diff --git a/include/rabbit_msg_store.hrl b/include/rabbit_msg_store.hrl index 9d704f65..e9150a97 100644 --- a/include/rabbit_msg_store.hrl +++ b/include/rabbit_msg_store.hrl @@ -22,5 +22,4 @@ -endif. --record(msg_location, - {guid, ref_count, file, offset, total_size}). +-record(msg_location, {msg_id, ref_count, file, offset, total_size}). diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 71b4aa6f..22ad3d05 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -27,8 +27,8 @@ -define(WRITE_OK_SIZE_BITS, 8). -define(WRITE_OK_MARKER, 255). -define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)). --define(GUID_SIZE_BYTES, 16). --define(GUID_SIZE_BITS, (8 * ?GUID_SIZE_BYTES)). +-define(MSG_ID_SIZE_BYTES, 16). +-define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)). -define(SCAN_BLOCK_SIZE, 4194304). %% 4MB %%---------------------------------------------------------------------------- @@ -55,14 +55,14 @@ %%---------------------------------------------------------------------------- -append(FileHdl, Guid, MsgBody) - when is_binary(Guid) andalso size(Guid) =:= ?GUID_SIZE_BYTES -> +append(FileHdl, MsgId, MsgBody) + when is_binary(MsgId) andalso size(MsgId) =:= ?MSG_ID_SIZE_BYTES -> MsgBodyBin = term_to_binary(MsgBody), MsgBodyBinSize = size(MsgBodyBin), - Size = MsgBodyBinSize + ?GUID_SIZE_BYTES, + Size = MsgBodyBinSize + ?MSG_ID_SIZE_BYTES, case file_handle_cache:append(FileHdl, <>) of ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT}; @@ -71,13 +71,13 @@ append(FileHdl, Guid, MsgBody) read(FileHdl, TotalSize) -> Size = TotalSize - ?FILE_PACKING_ADJUSTMENT, - BodyBinSize = Size - ?GUID_SIZE_BYTES, + BodyBinSize = Size - ?MSG_ID_SIZE_BYTES, case file_handle_cache:read(FileHdl, TotalSize) of {ok, <>} -> - {ok, {Guid, binary_to_term(MsgBodyBin)}}; + {ok, {MsgId, binary_to_term(MsgBodyBin)}}; KO -> KO end. @@ -102,21 +102,22 @@ scanner(<<>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; %% Nothing to do other than stop. -scanner(<>, Offset, Fun, Acc) -> TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, case WriteMarker of ?WRITE_OK_MARKER -> %% Here we take option 5 from %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the Guid as a number, and then convert it + %% which we read the MsgId as a number, and then convert it %% back to a binary in order to work around bugs in %% Erlang's GC. - <> = - <>, - <> = <>, + <> = + <>, + <> = + <>, scanner(Rest, Offset + TotalSize, Fun, - Fun({Guid, TotalSize, Offset, Msg}, Acc)); + Fun({MsgId, TotalSize, Offset, Msg}, Acc)); _ -> scanner(Rest, Offset + TotalSize, Fun, Acc) end; diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 02811da7..771835a1 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -74,7 +74,7 @@ %% to callbacks successfully_recovered, %% boolean: did we recover state? file_size_limit, %% how big are our files allowed to get? - cref_to_guids %% client ref to synced messages mapping + cref_to_msg_ids %% client ref to synced messages mapping }). -record(client_msstate, @@ -135,7 +135,7 @@ -type(msg_ref_delta_gen(A) :: fun ((A) -> 'finished' | {rabbit_types:msg_id(), non_neg_integer(), A})). --type(maybe_guid_fun() :: 'undefined' | fun ((gb_set()) -> any())). +-type(maybe_msg_id_fun() :: 'undefined' | fun ((gb_set()) -> any())). -type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')). -type(deletion_thunk() :: fun (() -> boolean())). @@ -143,7 +143,7 @@ (atom(), file:filename(), [binary()] | 'undefined', {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()). -spec(successfully_recovered_state/1 :: (server()) -> boolean()). --spec(client_init/4 :: (server(), client_ref(), maybe_guid_fun(), +-spec(client_init/4 :: (server(), client_ref(), maybe_msg_id_fun(), maybe_close_fds_fun()) -> client_msstate()). -spec(client_terminate/1 :: (client_msstate()) -> 'ok'). -spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok'). @@ -177,8 +177,8 @@ %% The components: %% -%% Index: this is a mapping from Guid to #msg_location{}: -%% {Guid, RefCount, File, Offset, TotalSize} +%% Index: this is a mapping from MsgId to #msg_location{}: +%% {MsgId, RefCount, File, Offset, TotalSize} %% By default, it's in ets, but it's also pluggable. %% FileSummary: this is an ets table which maps File to #file_summary{}: %% {File, ValidTotalSize, Left, Right, FileSize, Locked, Readers} @@ -279,7 +279,7 @@ %% alternating full files and files with only one tiny message in %% them). %% -%% Messages are reference-counted. When a message with the same guid +%% Messages are reference-counted. When a message with the same msg id %% is written several times we only store it once, and only remove it %% from the store when it has been removed the same number of times. %% @@ -422,29 +422,29 @@ client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) -> client_ref(#client_msstate { client_ref = Ref }) -> Ref. -write(Guid, Msg, +write(MsgId, Msg, CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts, client_ref = CRef }) -> - ok = update_msg_cache(CurFileCacheEts, Guid, Msg), - ok = server_cast(CState, {write, CRef, Guid}). + ok = update_msg_cache(CurFileCacheEts, MsgId, Msg), + ok = server_cast(CState, {write, CRef, MsgId}). -read(Guid, +read(MsgId, CState = #client_msstate { dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts }) -> %% 1. Check the dedup cache - case fetch_and_increment_cache(DedupCacheEts, Guid) of + case fetch_and_increment_cache(DedupCacheEts, MsgId) of not_found -> %% 2. Check the cur file cache - case ets:lookup(CurFileCacheEts, Guid) of + case ets:lookup(CurFileCacheEts, MsgId) of [] -> Defer = fun() -> - {server_call(CState, {read, Guid}), CState} + {server_call(CState, {read, MsgId}), CState} end, - case index_lookup_positive_ref_count(Guid, CState) of + case index_lookup_positive_ref_count(MsgId, CState) of not_found -> Defer(); MsgLocation -> client_read1(MsgLocation, Defer, CState) end; - [{Guid, Msg, _CacheRefCount}] -> + [{MsgId, Msg, _CacheRefCount}] -> %% Although we've found it, we don't know the %% refcount, so can't insert into dedup cache {{ok, Msg}, CState} @@ -453,13 +453,13 @@ read(Guid, {{ok, Msg}, CState} end. -contains(Guid, CState) -> server_call(CState, {contains, Guid}). +contains(MsgId, CState) -> server_call(CState, {contains, MsgId}). remove([], _CState) -> ok; -remove(Guids, CState = #client_msstate { client_ref = CRef }) -> - server_cast(CState, {remove, CRef, Guids}). +remove(MsgIds, CState = #client_msstate { client_ref = CRef }) -> + server_cast(CState, {remove, CRef, MsgIds}). release([], _CState) -> ok; -release(Guids, CState) -> server_cast(CState, {release, Guids}). -sync(Guids, K, CState) -> server_cast(CState, {sync, Guids, K}). +release(MsgIds, CState) -> server_cast(CState, {release, MsgIds}). +sync(MsgIds, K, CState) -> server_cast(CState, {sync, MsgIds, K}). sync(Server) -> gen_server2:cast(Server, sync). @@ -477,11 +477,11 @@ server_call(#client_msstate { server = Server }, Msg) -> server_cast(#client_msstate { server = Server }, Msg) -> gen_server2:cast(Server, Msg). -client_read1(#msg_location { guid = Guid, file = File } = MsgLocation, Defer, +client_read1(#msg_location { msg_id = MsgId, file = File } = MsgLocation, Defer, CState = #client_msstate { file_summary_ets = FileSummaryEts }) -> case ets:lookup(FileSummaryEts, File) of [] -> %% File has been GC'd and no longer exists. Go around again. - read(Guid, CState); + read(MsgId, CState); [#file_summary { locked = Locked, right = Right }] -> client_read2(Locked, Right, MsgLocation, Defer, CState) end. @@ -503,7 +503,7 @@ client_read2(true, _Right, _MsgLocation, Defer, _CState) -> %% the safest and simplest thing to do. Defer(); client_read2(false, _Right, - MsgLocation = #msg_location { guid = Guid, file = File }, + MsgLocation = #msg_location { msg_id = MsgId, file = File }, Defer, CState = #client_msstate { file_summary_ets = FileSummaryEts }) -> %% It's entirely possible that everything we're doing from here on @@ -512,9 +512,9 @@ client_read2(false, _Right, safe_ets_update_counter( FileSummaryEts, File, {#file_summary.readers, +1}, fun (_) -> client_read3(MsgLocation, Defer, CState) end, - fun () -> read(Guid, CState) end). + fun () -> read(MsgId, CState) end). -client_read3(#msg_location { guid = Guid, file = File }, Defer, +client_read3(#msg_location { msg_id = MsgId, file = File }, Defer, CState = #client_msstate { file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, dedup_cache_ets = DedupCacheEts, @@ -539,7 +539,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% too). case ets:lookup(FileSummaryEts, File) of [] -> %% GC has deleted our file, just go round again. - read(Guid, CState); + read(MsgId, CState); [#file_summary { locked = true }] -> %% If we get a badarg here, then the GC has finished and %% deleted our file. Try going around again. Otherwise, @@ -550,7 +550,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% unlocks the dest) try Release(), Defer() - catch error:badarg -> read(Guid, CState) + catch error:badarg -> read(MsgId, CState) end; [#file_summary { locked = false }] -> %% Ok, we're definitely safe to continue - a GC involving @@ -563,7 +563,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% us doing the lookup and the +1 on the readers. (Same as %% badarg scenario above, but we don't have a missing file %% - we just have the /wrong/ file). - case index_lookup(Guid, CState) of + case index_lookup(MsgId, CState) of #msg_location { file = File } = MsgLocation -> %% Still the same file. {ok, CState1} = close_all_indicated(CState), @@ -589,9 +589,9 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, end end. -clear_client(CRef, State = #msstate { cref_to_guids = CTG, +clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM, dying_clients = DyingClients }) -> - State #msstate { cref_to_guids = dict:erase(CRef, CTG), + State #msstate { cref_to_msg_ids = dict:erase(CRef, CTM), dying_clients = sets:del_element(CRef, DyingClients) }. @@ -666,7 +666,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> clients = Clients, successfully_recovered = CleanShutdown, file_size_limit = FileSizeLimit, - cref_to_guids = dict:new() + cref_to_msg_ids = dict:new() }, %% If we didn't recover the msg location index then we need to @@ -698,7 +698,7 @@ prioritise_call(Msg, _From, _State) -> case Msg of successfully_recovered_state -> 7; {new_client_state, _Ref, _MODC, _CloseFDsFun} -> 7; - {read, _Guid} -> 2; + {read, _MsgId} -> 2; _ -> 0 end. @@ -733,12 +733,12 @@ handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, handle_call({client_terminate, CRef}, _From, State) -> reply(ok, clear_client(CRef, State)); -handle_call({read, Guid}, From, State) -> - State1 = read_message(Guid, From, State), +handle_call({read, MsgId}, From, State) -> + State1 = read_message(MsgId, From, State), noreply(State1); -handle_call({contains, Guid}, From, State) -> - State1 = contains_message(Guid, From, State), +handle_call({contains, MsgId}, From, State) -> + State1 = contains_message(MsgId, From, State), noreply(State1). handle_cast({client_dying, CRef}, @@ -751,53 +751,53 @@ handle_cast({client_delete, CRef}, State = #msstate { clients = Clients }) -> State1 = State #msstate { clients = dict:erase(CRef, Clients) }, noreply(remove_message(CRef, CRef, clear_client(CRef, State1))); -handle_cast({write, CRef, Guid}, +handle_cast({write, CRef, MsgId}, State = #msstate { cur_file_cache_ets = CurFileCacheEts }) -> - true = 0 =< ets:update_counter(CurFileCacheEts, Guid, {3, -1}), - [{Guid, Msg, _CacheRefCount}] = ets:lookup(CurFileCacheEts, Guid), + true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}), + [{MsgId, Msg, _CacheRefCount}] = ets:lookup(CurFileCacheEts, MsgId), noreply( - case write_action(should_mask_action(CRef, Guid, State), Guid, State) of + case write_action(should_mask_action(CRef, MsgId, State), MsgId, State) of {write, State1} -> - write_message(CRef, Guid, Msg, State1); + write_message(CRef, MsgId, Msg, State1); {ignore, CurFile, State1 = #msstate { current_file = CurFile }} -> State1; {ignore, _File, State1} -> - true = ets:delete_object(CurFileCacheEts, {Guid, Msg, 0}), + true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}), State1; {confirm, CurFile, State1 = #msstate { current_file = CurFile }}-> - record_pending_confirm(CRef, Guid, State1); + record_pending_confirm(CRef, MsgId, State1); {confirm, _File, State1} -> - true = ets:delete_object(CurFileCacheEts, {Guid, Msg, 0}), + true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}), update_pending_confirms( - fun (MsgOnDiskFun, CTG) -> - MsgOnDiskFun(gb_sets:singleton(Guid), written), - CTG + fun (MsgOnDiskFun, CTM) -> + MsgOnDiskFun(gb_sets:singleton(MsgId), written), + CTM end, CRef, State1) end); -handle_cast({remove, CRef, Guids}, State) -> +handle_cast({remove, CRef, MsgIds}, State) -> State1 = lists:foldl( - fun (Guid, State2) -> remove_message(Guid, CRef, State2) end, - State, Guids), - noreply(maybe_compact( - client_confirm(CRef, gb_sets:from_list(Guids), removed, State1))); + fun (MsgId, State2) -> remove_message(MsgId, CRef, State2) end, + State, MsgIds), + noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(MsgIds), + removed, State1))); -handle_cast({release, Guids}, State = +handle_cast({release, MsgIds}, State = #msstate { dedup_cache_ets = DedupCacheEts }) -> lists:foreach( - fun (Guid) -> decrement_cache(DedupCacheEts, Guid) end, Guids), + fun (MsgId) -> decrement_cache(DedupCacheEts, MsgId) end, MsgIds), noreply(State); -handle_cast({sync, Guids, K}, +handle_cast({sync, MsgIds, K}, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, on_sync = Syncs }) -> {ok, SyncOffset} = file_handle_cache:last_sync_offset(CurHdl), - case lists:any(fun (Guid) -> + case lists:any(fun (MsgId) -> #msg_location { file = File, offset = Offset } = - index_lookup(Guid, State), + index_lookup(MsgId, State), File =:= CurFile andalso Offset >= SyncOffset - end, Guids) of + end, MsgIds) of false -> K(), noreply(State); true -> noreply(State #msstate { on_sync = [K | Syncs] }) @@ -879,16 +879,16 @@ reply(Reply, State) -> {State1, Timeout} = next_state(State), {reply, Reply, State1, Timeout}. -next_state(State = #msstate { sync_timer_ref = undefined, - on_sync = Syncs, - cref_to_guids = CTG }) -> - case {Syncs, dict:size(CTG)} of +next_state(State = #msstate { sync_timer_ref = undefined, + on_sync = Syncs, + cref_to_msg_ids = CTM }) -> + case {Syncs, dict:size(CTM)} of {[], 0} -> {State, hibernate}; _ -> {start_sync_timer(State), 0} end; -next_state(State = #msstate { on_sync = Syncs, - cref_to_guids = CTG }) -> - case {Syncs, dict:size(CTG)} of +next_state(State = #msstate { on_sync = Syncs, + cref_to_msg_ids = CTM }) -> + case {Syncs, dict:size(CTM)} of {[], 0} -> {stop_sync_timer(State), hibernate}; _ -> {State, 0} end. @@ -905,66 +905,66 @@ stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> internal_sync(State = #msstate { current_file_handle = CurHdl, on_sync = Syncs, - cref_to_guids = CTG }) -> + cref_to_msg_ids = CTM }) -> State1 = stop_sync_timer(State), - CGs = dict:fold(fun (CRef, Guids, NS) -> - case gb_sets:is_empty(Guids) of + CGs = dict:fold(fun (CRef, MsgIds, NS) -> + case gb_sets:is_empty(MsgIds) of true -> NS; - false -> [{CRef, Guids} | NS] + false -> [{CRef, MsgIds} | NS] end - end, [], CTG), + end, [], CTM), case {Syncs, CGs} of {[], []} -> ok; _ -> file_handle_cache:sync(CurHdl) end, [K() || K <- lists:reverse(Syncs)], - [client_confirm(CRef, Guids, written, State1) || {CRef, Guids} <- CGs], - State1 #msstate { cref_to_guids = dict:new(), on_sync = [] }. + [client_confirm(CRef, MsgIds, written, State1) || {CRef, MsgIds} <- CGs], + State1 #msstate { cref_to_msg_ids = dict:new(), on_sync = [] }. -write_action({true, not_found}, _Guid, State) -> +write_action({true, not_found}, _MsgId, State) -> {ignore, undefined, State}; -write_action({true, #msg_location { file = File }}, _Guid, State) -> +write_action({true, #msg_location { file = File }}, _MsgId, State) -> {ignore, File, State}; -write_action({false, not_found}, _Guid, State) -> +write_action({false, not_found}, _MsgId, State) -> {write, State}; write_action({Mask, #msg_location { ref_count = 0, file = File, total_size = TotalSize }}, - Guid, State = #msstate { file_summary_ets = FileSummaryEts }) -> + MsgId, State = #msstate { file_summary_ets = FileSummaryEts }) -> case {Mask, ets:lookup(FileSummaryEts, File)} of {false, [#file_summary { locked = true }]} -> - ok = index_delete(Guid, State), + ok = index_delete(MsgId, State), {write, State}; {false_if_increment, [#file_summary { locked = true }]} -> - %% The msg for Guid is older than the client death + %% The msg for MsgId is older than the client death %% message, but as it is being GC'd currently we'll have %% to write a new copy, which will then be younger, so %% ignore this write. {ignore, File, State}; {_Mask, [#file_summary {}]} -> - ok = index_update_ref_count(Guid, 1, State), + ok = index_update_ref_count(MsgId, 1, State), State1 = adjust_valid_total_size(File, TotalSize, State), {confirm, File, State1} end; write_action({_Mask, #msg_location { ref_count = RefCount, file = File }}, - Guid, State) -> - ok = index_update_ref_count(Guid, RefCount + 1, State), + MsgId, State) -> + ok = index_update_ref_count(MsgId, RefCount + 1, State), %% We already know about it, just update counter. Only update %% field otherwise bad interaction with concurrent GC {confirm, File, State}. -write_message(CRef, Guid, Msg, State) -> - write_message(Guid, Msg, record_pending_confirm(CRef, Guid, State)). +write_message(CRef, MsgId, Msg, State) -> + write_message(MsgId, Msg, record_pending_confirm(CRef, MsgId, State)). -write_message(Guid, Msg, +write_message(MsgId, Msg, State = #msstate { current_file_handle = CurHdl, current_file = CurFile, sum_valid_data = SumValid, sum_file_size = SumFileSize, file_summary_ets = FileSummaryEts }) -> {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl), - {ok, TotalSize} = rabbit_msg_file:append(CurHdl, Guid, Msg), + {ok, TotalSize} = rabbit_msg_file:append(CurHdl, MsgId, Msg), ok = index_insert( - #msg_location { guid = Guid, ref_count = 1, file = CurFile, + #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile, offset = CurOffset, total_size = TotalSize }, State), [#file_summary { right = undefined, locked = false }] = ets:lookup(FileSummaryEts, CurFile), @@ -976,21 +976,21 @@ write_message(Guid, Msg, sum_valid_data = SumValid + TotalSize, sum_file_size = SumFileSize + TotalSize }). -read_message(Guid, From, +read_message(MsgId, From, State = #msstate { dedup_cache_ets = DedupCacheEts }) -> - case index_lookup_positive_ref_count(Guid, State) of + case index_lookup_positive_ref_count(MsgId, State) of not_found -> gen_server2:reply(From, not_found), State; MsgLocation -> - case fetch_and_increment_cache(DedupCacheEts, Guid) of + case fetch_and_increment_cache(DedupCacheEts, MsgId) of not_found -> read_message1(From, MsgLocation, State); Msg -> gen_server2:reply(From, {ok, Msg}), State end end. -read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, +read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, file = File, offset = Offset } = MsgLoc, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, @@ -1000,7 +1000,7 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, case File =:= CurFile of true -> {Msg, State1} = %% can return [] if msg in file existed on startup - case ets:lookup(CurFileCacheEts, Guid) of + case ets:lookup(CurFileCacheEts, MsgId) of [] -> {ok, RawOffSet} = file_handle_cache:current_raw_offset(CurHdl), @@ -1009,9 +1009,9 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, false -> ok end, read_from_disk(MsgLoc, State, DedupCacheEts); - [{Guid, Msg1, _CacheRefCount}] -> + [{MsgId, Msg1, _CacheRefCount}] -> ok = maybe_insert_into_cache( - DedupCacheEts, RefCount, Guid, Msg1), + DedupCacheEts, RefCount, MsgId, Msg1), {Msg1, State} end, gen_server2:reply(From, {ok, Msg}), @@ -1019,7 +1019,7 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, false -> [#file_summary { locked = Locked }] = ets:lookup(FileSummaryEts, File), case Locked of - true -> add_to_pending_gc_completion({read, Guid, From}, + true -> add_to_pending_gc_completion({read, MsgId, From}, File, State); false -> {Msg, State1} = read_from_disk(MsgLoc, State, DedupCacheEts), @@ -1028,47 +1028,47 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, end end. -read_from_disk(#msg_location { guid = Guid, ref_count = RefCount, +read_from_disk(#msg_location { msg_id = MsgId, ref_count = RefCount, file = File, offset = Offset, total_size = TotalSize }, State, DedupCacheEts) -> {Hdl, State1} = get_read_handle(File, State), {ok, Offset} = file_handle_cache:position(Hdl, Offset), - {ok, {Guid, Msg}} = + {ok, {MsgId, Msg}} = case rabbit_msg_file:read(Hdl, TotalSize) of - {ok, {Guid, _}} = Obj -> + {ok, {MsgId, _}} = Obj -> Obj; Rest -> {error, {misread, [{old_state, State}, {file_num, File}, {offset, Offset}, - {guid, Guid}, + {msg_id, MsgId}, {read, Rest}, {proc_dict, get()} ]}} end, - ok = maybe_insert_into_cache(DedupCacheEts, RefCount, Guid, Msg), + ok = maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg), {Msg, State1}. -contains_message(Guid, From, +contains_message(MsgId, From, State = #msstate { pending_gc_completion = Pending }) -> - case index_lookup_positive_ref_count(Guid, State) of + case index_lookup_positive_ref_count(MsgId, State) of not_found -> gen_server2:reply(From, false), State; #msg_location { file = File } -> case orddict:is_key(File, Pending) of true -> add_to_pending_gc_completion( - {contains, Guid, From}, File, State); + {contains, MsgId, From}, File, State); false -> gen_server2:reply(From, true), State end end. -remove_message(Guid, CRef, +remove_message(MsgId, CRef, State = #msstate { file_summary_ets = FileSummaryEts, dedup_cache_ets = DedupCacheEts }) -> - case should_mask_action(CRef, Guid, State) of + case should_mask_action(CRef, MsgId, State) of {true, _Location} -> State; {false_if_increment, #msg_location { ref_count = 0 }} -> @@ -1081,24 +1081,25 @@ remove_message(Guid, CRef, total_size = TotalSize }} when RefCount > 0 -> %% only update field, otherwise bad interaction with %% concurrent GC - Dec = - fun () -> index_update_ref_count(Guid, RefCount - 1, State) end, + Dec = fun () -> + index_update_ref_count(MsgId, RefCount - 1, State) + end, case RefCount of %% don't remove from CUR_FILE_CACHE_ETS_NAME here %% because there may be further writes in the mailbox %% for the same msg. - 1 -> ok = remove_cache_entry(DedupCacheEts, Guid), + 1 -> ok = remove_cache_entry(DedupCacheEts, MsgId), case ets:lookup(FileSummaryEts, File) of [#file_summary { locked = true }] -> add_to_pending_gc_completion( - {remove, Guid, CRef}, File, State); + {remove, MsgId, CRef}, File, State); [#file_summary {}] -> ok = Dec(), delete_file_if_empty( File, adjust_valid_total_size(File, -TotalSize, State)) end; - _ -> ok = decrement_cache(DedupCacheEts, Guid), + _ -> ok = decrement_cache(DedupCacheEts, MsgId), ok = Dec(), State end @@ -1119,12 +1120,12 @@ run_pending(Files, State) -> lists:reverse(orddict:fetch(File, Pending))) end, State, Files). -run_pending_action({read, Guid, From}, State) -> - read_message(Guid, From, State); -run_pending_action({contains, Guid, From}, State) -> - contains_message(Guid, From, State); -run_pending_action({remove, Guid, CRef}, State) -> - remove_message(Guid, CRef, State). +run_pending_action({read, MsgId, From}, State) -> + read_message(MsgId, From, State); +run_pending_action({contains, MsgId, From}, State) -> + contains_message(MsgId, From, State); +run_pending_action({remove, MsgId, CRef}, State) -> + remove_message(MsgId, CRef, State). safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) -> try @@ -1146,44 +1147,46 @@ orddict_store(Key, Val, Dict) -> false = orddict:is_key(Key, Dict), orddict:store(Key, Val, Dict). -update_pending_confirms(Fun, CRef, State = #msstate { clients = Clients, - cref_to_guids = CTG }) -> +update_pending_confirms(Fun, CRef, + State = #msstate { clients = Clients, + cref_to_msg_ids = CTM }) -> case dict:fetch(CRef, Clients) of {undefined, _CloseFDsFun} -> State; - {MsgOnDiskFun, _CloseFDsFun} -> CTG1 = Fun(MsgOnDiskFun, CTG), - State #msstate { cref_to_guids = CTG1 } + {MsgOnDiskFun, _CloseFDsFun} -> CTM1 = Fun(MsgOnDiskFun, CTM), + State #msstate { + cref_to_msg_ids = CTM1 } end. -record_pending_confirm(CRef, Guid, State) -> +record_pending_confirm(CRef, MsgId, State) -> update_pending_confirms( - fun (_MsgOnDiskFun, CTG) -> - dict:update(CRef, fun (Guids) -> gb_sets:add(Guid, Guids) end, - gb_sets:singleton(Guid), CTG) + fun (_MsgOnDiskFun, CTM) -> + dict:update(CRef, fun (MsgIds) -> gb_sets:add(MsgId, MsgIds) end, + gb_sets:singleton(MsgId), CTM) end, CRef, State). -client_confirm(CRef, Guids, ActionTaken, State) -> +client_confirm(CRef, MsgIds, ActionTaken, State) -> update_pending_confirms( - fun (MsgOnDiskFun, CTG) -> - MsgOnDiskFun(Guids, ActionTaken), - case dict:find(CRef, CTG) of - {ok, Gs} -> Guids1 = gb_sets:difference(Gs, Guids), - case gb_sets:is_empty(Guids1) of - true -> dict:erase(CRef, CTG); - false -> dict:store(CRef, Guids1, CTG) + fun (MsgOnDiskFun, CTM) -> + MsgOnDiskFun(MsgIds, ActionTaken), + case dict:find(CRef, CTM) of + {ok, Gs} -> MsgIds1 = gb_sets:difference(Gs, MsgIds), + case gb_sets:is_empty(MsgIds1) of + true -> dict:erase(CRef, CTM); + false -> dict:store(CRef, MsgIds1, CTM) end; - error -> CTG + error -> CTM end end, CRef, State). -%% Detect whether the Guid is older or younger than the client's death +%% Detect whether the MsgId is older or younger than the client's death %% msg (if there is one). If the msg is older than the client death %% msg, and it has a 0 ref_count we must only alter the ref_count, not %% rewrite the msg - rewriting it would make it younger than the death %% msg and thus should be ignored. Note that this (correctly) returns %% false when testing to remove the death msg itself. -should_mask_action(CRef, Guid, +should_mask_action(CRef, MsgId, State = #msstate { dying_clients = DyingClients }) -> - case {sets:is_element(CRef, DyingClients), index_lookup(Guid, State)} of + case {sets:is_element(CRef, DyingClients), index_lookup(MsgId, State)} of {false, Location} -> {false, Location}; {true, not_found} -> @@ -1320,43 +1323,43 @@ list_sorted_file_names(Dir, Ext) -> %% message cache helper functions %%---------------------------------------------------------------------------- -maybe_insert_into_cache(DedupCacheEts, RefCount, Guid, Msg) +maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg) when RefCount > 1 -> - update_msg_cache(DedupCacheEts, Guid, Msg); -maybe_insert_into_cache(_DedupCacheEts, _RefCount, _Guid, _Msg) -> + update_msg_cache(DedupCacheEts, MsgId, Msg); +maybe_insert_into_cache(_DedupCacheEts, _RefCount, _MsgId, _Msg) -> ok. -update_msg_cache(CacheEts, Guid, Msg) -> - case ets:insert_new(CacheEts, {Guid, Msg, 1}) of +update_msg_cache(CacheEts, MsgId, Msg) -> + case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of true -> ok; false -> safe_ets_update_counter_ok( - CacheEts, Guid, {3, +1}, - fun () -> update_msg_cache(CacheEts, Guid, Msg) end) + CacheEts, MsgId, {3, +1}, + fun () -> update_msg_cache(CacheEts, MsgId, Msg) end) end. -remove_cache_entry(DedupCacheEts, Guid) -> - true = ets:delete(DedupCacheEts, Guid), +remove_cache_entry(DedupCacheEts, MsgId) -> + true = ets:delete(DedupCacheEts, MsgId), ok. -fetch_and_increment_cache(DedupCacheEts, Guid) -> - case ets:lookup(DedupCacheEts, Guid) of +fetch_and_increment_cache(DedupCacheEts, MsgId) -> + case ets:lookup(DedupCacheEts, MsgId) of [] -> not_found; - [{_Guid, Msg, _RefCount}] -> + [{_MsgId, Msg, _RefCount}] -> safe_ets_update_counter_ok( - DedupCacheEts, Guid, {3, +1}, + DedupCacheEts, MsgId, {3, +1}, %% someone has deleted us in the meantime, insert us - fun () -> ok = update_msg_cache(DedupCacheEts, Guid, Msg) end), + fun () -> ok = update_msg_cache(DedupCacheEts, MsgId, Msg) end), Msg end. -decrement_cache(DedupCacheEts, Guid) -> +decrement_cache(DedupCacheEts, MsgId) -> true = safe_ets_update_counter( - DedupCacheEts, Guid, {3, -1}, - fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, Guid); + DedupCacheEts, MsgId, {3, -1}, + fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, MsgId); (_N) -> true end, - %% Guid is not in there because although it's been + %% MsgId is not in there because although it's been %% delivered, it's never actually been read (think: %% persistent message held in RAM) fun () -> true end), @@ -1473,19 +1476,19 @@ count_msg_refs(Gen, Seed, State) -> case Gen(Seed) of finished -> ok; - {_Guid, 0, Next} -> + {_MsgId, 0, Next} -> count_msg_refs(Gen, Next, State); - {Guid, Delta, Next} -> - ok = case index_lookup(Guid, State) of + {MsgId, Delta, Next} -> + ok = case index_lookup(MsgId, State) of not_found -> - index_insert(#msg_location { guid = Guid, + index_insert(#msg_location { msg_id = MsgId, file = undefined, ref_count = Delta }, State); #msg_location { ref_count = RefCount } = StoreEntry -> NewRefCount = RefCount + Delta, case NewRefCount of - 0 -> index_delete(Guid, State); + 0 -> index_delete(MsgId, State); _ -> index_update(StoreEntry #msg_location { ref_count = NewRefCount }, State) @@ -1539,8 +1542,8 @@ scan_file_for_valid_messages(Dir, FileName) -> {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} end. -scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> - [{Guid, TotalSize, Offset} | Acc]. +scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) -> + [{MsgId, TotalSize, Offset} | Acc]. %% Takes the list in *ascending* order (i.e. eldest message %% first). This is the opposite of what scan_file_for_valid_messages @@ -1619,8 +1622,8 @@ build_index_worker(Gatherer, State = #msstate { dir = Dir }, scan_file_for_valid_messages(Dir, filenum_to_name(File)), {ValidMessages, ValidTotalSize} = lists:foldl( - fun (Obj = {Guid, TotalSize, Offset}, {VMAcc, VTSAcc}) -> - case index_lookup(Guid, State) of + fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) -> + case index_lookup(MsgId, State) of #msg_location { file = undefined } = StoreEntry -> ok = index_update(StoreEntry #msg_location { file = File, offset = Offset, @@ -1638,7 +1641,7 @@ build_index_worker(Gatherer, State = #msstate { dir = Dir }, %% file size. [] -> {undefined, case ValidMessages of [] -> 0; - _ -> {_Guid, TotalSize, Offset} = + _ -> {_MsgId, TotalSize, Offset} = lists:last(ValidMessages), Offset + TotalSize end}; @@ -1903,8 +1906,8 @@ load_and_vacuum_message_file(File, #gc_state { dir = Dir, scan_file_for_valid_messages(Dir, filenum_to_name(File)), %% foldl will reverse so will end up with msgs in ascending offset order lists:foldl( - fun ({Guid, TotalSize, Offset}, Acc = {List, Size}) -> - case Index:lookup(Guid, IndexState) of + fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) -> + case Index:lookup(MsgId, IndexState) of #msg_location { file = File, total_size = TotalSize, offset = Offset, ref_count = 0 } = Entry -> ok = Index:delete_object(Entry, IndexState), @@ -1929,13 +1932,13 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, end, case lists:foldl( - fun (#msg_location { guid = Guid, offset = Offset, + fun (#msg_location { msg_id = MsgId, offset = Offset, total_size = TotalSize }, {CurOffset, Block = {BlockStart, BlockEnd}}) -> %% CurOffset is in the DestinationFile. %% Offset, BlockStart and BlockEnd are in the SourceFile %% update MsgLocation to reflect change of file and offset - ok = Index:update_fields(Guid, + ok = Index:update_fields(MsgId, [{#msg_location.file, Destination}, {#msg_location.offset, CurOffset}], IndexState), @@ -2002,9 +2005,9 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), - fun({Guid, _Size, _Offset, BinMsg}, ok) -> + fun({MsgId, _Size, _Offset, BinMsg}, ok) -> {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), - {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew), ok end, ok), file_handle_cache:close(RefOld), diff --git a/src/rabbit_msg_store_ets_index.erl b/src/rabbit_msg_store_ets_index.erl index 077400d6..d6dc5568 100644 --- a/src/rabbit_msg_store_ets_index.erl +++ b/src/rabbit_msg_store_ets_index.erl @@ -31,7 +31,7 @@ new(Dir) -> file:delete(filename:join(Dir, ?FILENAME)), - Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.guid}]), + Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]), #state { table = Tid, dir = Dir }. recover(Dir) -> -- cgit v1.2.1 From 87d9ba2a4387a56f228f6e2ffc54a354b8e6a67d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 17:55:05 +0000 Subject: guid -> msg_id in qi --- src/rabbit_queue_index.erl | 87 +++++++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 7b5aa120..a4984114 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -86,7 +86,7 @@ %% and seeding the message store on start up. %% %% Note that in general, the representation of a message's state as -%% the tuple: {('no_pub'|{Guid, MsgProps, IsPersistent}), +%% the tuple: {('no_pub'|{MsgId, MsgProps, IsPersistent}), %% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly %% necessary for most operations. However, for startup, and to ensure %% the safe and correct combination of journal entries with entries @@ -138,10 +138,10 @@ -define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)). -define(NO_EXPIRY, 0). --define(GUID_BYTES, 16). %% md5sum is 128 bit or 16 bytes --define(GUID_BITS, (?GUID_BYTES * 8)). +-define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes +-define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)). %% 16 bytes for md5sum + 8 for expiry + 2 for seq, bits and prefix --define(PUBLISH_RECORD_LENGTH_BYTES, ?GUID_BYTES + ?EXPIRY_BYTES + 2). +-define(PUBLISH_RECORD_LENGTH_BYTES, ?MSG_ID_BYTES + ?EXPIRY_BYTES + 2). %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * @@ -150,7 +150,7 @@ %% ---- misc ---- --define(PUB, {_, _, _}). %% {Guid, MsgProps, IsPersistent} +-define(PUB, {_, _, _}). %% {MsgId, MsgProps, IsPersistent} -define(READ_MODE, [binary, raw, read]). -define(READ_AHEAD_MODE, [{read_ahead, ?SEGMENT_TOTAL_SIZE} | ?READ_MODE]). @@ -159,7 +159,7 @@ %%---------------------------------------------------------------------------- -record(qistate, { dir, segments, journal_handle, dirty_count, - max_journal_entries, on_sync, unsynced_guids }). + max_journal_entries, on_sync, unsynced_msg_ids }). -record(segment, { num, path, journal_entries, unacked }). @@ -187,7 +187,7 @@ dirty_count :: integer(), max_journal_entries :: non_neg_integer(), on_sync :: on_sync_fun(), - unsynced_guids :: [rabbit_types:msg_id()] + unsynced_msg_ids :: [rabbit_types:msg_id()] }). -type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())). -type(walker(A) :: fun ((A) -> 'finished' | @@ -258,22 +258,22 @@ delete_and_terminate(State) -> ok = rabbit_misc:recursive_delete([Dir]), State1. -publish(Guid, SeqId, MsgProps, IsPersistent, - State = #qistate { unsynced_guids = UnsyncedGuids }) - when is_binary(Guid) -> - ?GUID_BYTES = size(Guid), +publish(MsgId, SeqId, MsgProps, IsPersistent, + State = #qistate { unsynced_msg_ids = UnsyncedMsgIds }) + when is_binary(MsgId) -> + ?MSG_ID_BYTES = size(MsgId), {JournalHdl, State1} = get_journal_handle( State #qistate { - unsynced_guids = [Guid | UnsyncedGuids] }), + unsynced_msg_ids = [MsgId | UnsyncedMsgIds] }), ok = file_handle_cache:append( JournalHdl, [<<(case IsPersistent of true -> ?PUB_PERSIST_JPREFIX; false -> ?PUB_TRANS_JPREFIX end):?JPREFIX_BITS, SeqId:?SEQ_BITS>>, - create_pub_record_body(Guid, MsgProps)]), + create_pub_record_body(MsgId, MsgProps)]), maybe_flush_journal( - add_to_journal(SeqId, {Guid, MsgProps, IsPersistent}, State1)). + add_to_journal(SeqId, {MsgId, MsgProps, IsPersistent}, State1)). deliver(SeqIds, State) -> deliver_or_ack(del, SeqIds, State). @@ -283,8 +283,8 @@ ack(SeqIds, State) -> %% This is only called when there are outstanding confirms and the %% queue is idle. -sync(State = #qistate { unsynced_guids = Guids }) -> - sync_if([] =/= Guids, State). +sync(State = #qistate { unsynced_msg_ids = MsgIds }) -> + sync_if([] =/= MsgIds, State). sync(SeqIds, State) -> %% The SeqIds here contains the SeqId of every publish and ack in @@ -387,7 +387,7 @@ blank_state(QueueName) -> dirty_count = 0, max_journal_entries = MaxJournal, on_sync = fun (_) -> ok end, - unsynced_guids = [] }. + unsynced_msg_ids = [] }. clean_file_name(Dir) -> filename:join(Dir, ?CLEAN_FILENAME). @@ -469,8 +469,9 @@ recover_segment(ContainsCheckFun, CleanShutdown, {SegEntries1, UnackedCountDelta} = segment_plus_journal(SegEntries, JEntries), array:sparse_foldl( - fun (RelSeq, {{Guid, _MsgProps, _IsPersistent}, Del, no_ack}, Segment1) -> - recover_message(ContainsCheckFun(Guid), CleanShutdown, + fun (RelSeq, {{MsgId, _MsgProps, _IsPersistent}, Del, no_ack}, + Segment1) -> + recover_message(ContainsCheckFun(MsgId), CleanShutdown, Del, RelSeq, Segment1) end, Segment #segment { unacked = UnackedCount + UnackedCountDelta }, @@ -514,17 +515,17 @@ queue_index_walker({next, Gatherer}) when is_pid(Gatherer) -> ok = gatherer:stop(Gatherer), ok = rabbit_misc:unlink_and_capture_exit(Gatherer), finished; - {value, {Guid, Count}} -> - {Guid, Count, {next, Gatherer}} + {value, {MsgId, Count}} -> + {MsgId, Count, {next, Gatherer}} end. queue_index_walker_reader(QueueName, Gatherer) -> State = #qistate { segments = Segments, dir = Dir } = recover_journal(blank_state(QueueName)), [ok = segment_entries_foldr( - fun (_RelSeq, {{Guid, _MsgProps, true}, _IsDelivered, no_ack}, + fun (_RelSeq, {{MsgId, _MsgProps, true}, _IsDelivered, no_ack}, ok) -> - gatherer:in(Gatherer, {Guid, 1}); + gatherer:in(Gatherer, {MsgId, 1}); (_RelSeq, _Value, Acc) -> Acc end, ok, segment_find_or_new(Seg, Dir, Segments)) || @@ -536,24 +537,24 @@ queue_index_walker_reader(QueueName, Gatherer) -> %% expiry/binary manipulation %%---------------------------------------------------------------------------- -create_pub_record_body(Guid, #message_properties{expiry = Expiry}) -> - [Guid, expiry_to_binary(Expiry)]. +create_pub_record_body(MsgId, #message_properties{expiry = Expiry}) -> + [MsgId, expiry_to_binary(Expiry)]. expiry_to_binary(undefined) -> <>; expiry_to_binary(Expiry) -> <>. read_pub_record_body(Hdl) -> - case file_handle_cache:read(Hdl, ?GUID_BYTES + ?EXPIRY_BYTES) of + case file_handle_cache:read(Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of {ok, Bin} -> %% work around for binary data fragmentation. See %% rabbit_msg_file:read_next/2 - <> = Bin, - <> = <>, + <> = Bin, + <> = <>, Exp = case Expiry of ?NO_EXPIRY -> undefined; X -> X end, - {Guid, #message_properties{expiry = Exp}}; + {MsgId, #message_properties{expiry = Exp}}; Error -> Error end. @@ -680,8 +681,8 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> case read_pub_record_body(Hdl) of - {Guid, MsgProps} -> - Publish = {Guid, MsgProps, + {MsgId, MsgProps} -> + Publish = {MsgId, MsgProps, case Prefix of ?PUB_PERSIST_JPREFIX -> true; ?PUB_TRANS_JPREFIX -> false @@ -715,9 +716,9 @@ sync_if(true, State = #qistate { journal_handle = JournalHdl }) -> ok = file_handle_cache:sync(JournalHdl), notify_sync(State). -notify_sync(State = #qistate { unsynced_guids = UG, on_sync = OnSyncFun }) -> +notify_sync(State = #qistate { unsynced_msg_ids = UG, on_sync = OnSyncFun }) -> OnSyncFun(gb_sets:from_list(UG)), - State #qistate { unsynced_guids = [] }. + State #qistate { unsynced_msg_ids = [] }. %%---------------------------------------------------------------------------- %% segment manipulation @@ -795,12 +796,12 @@ write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> ok = case Pub of no_pub -> ok; - {Guid, MsgProps, IsPersistent} -> + {MsgId, MsgProps, IsPersistent} -> file_handle_cache:append( Hdl, [<>, - create_pub_record_body(Guid, MsgProps)]) + create_pub_record_body(MsgId, MsgProps)]) end, ok = case {Del, Ack} of {no_del, no_ack} -> @@ -820,10 +821,10 @@ read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq}, {Messages, Segments}, Dir) -> Segment = segment_find_or_new(Seg, Dir, Segments), {segment_entries_foldr( - fun (RelSeq, {{Guid, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc) + fun (RelSeq, {{MsgId, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc) when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso (Seg < EndSeg orelse EndRelSeq >= RelSeq) -> - [ {Guid, reconstruct_seq_id(StartSeg, RelSeq), MsgProps, + [ {MsgId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps, IsPersistent, IsDelivered == del} | Acc ]; (_RelSeq, _Value, Acc) -> Acc @@ -853,8 +854,8 @@ load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of {ok, <>} -> - {Guid, MsgProps} = read_pub_record_body(Hdl), - Obj = {{Guid, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, + {MsgId, MsgProps} = read_pub_record_body(Hdl), + Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, SegEntries1 = array:set(RelSeq, Obj, SegEntries), load_segment_entries(KeepAcked, Hdl, SegEntries1, UnackedCount + 1); @@ -1001,17 +1002,17 @@ add_queue_ttl_journal(<>) -> {<>, Rest}; add_queue_ttl_journal(<>) -> - {[<>, Guid, + MsgId:?MSG_ID_BYTES/binary, Rest/binary>>) -> + {[<>, MsgId, expiry_to_binary(undefined)], Rest}; add_queue_ttl_journal(_) -> stop. add_queue_ttl_segment(<>) -> {[<>, Guid, expiry_to_binary(undefined)], Rest}; + RelSeq:?REL_SEQ_BITS>>, MsgId, expiry_to_binary(undefined)], Rest}; add_queue_ttl_segment(<>) -> {<>, -- cgit v1.2.1 From ab3668ec2104d35a57efdf828db521ecbb5a0dac Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:30:06 +0000 Subject: guid -> msg_id --- src/rabbit_backing_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 6a21e10f..03c1fdd1 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -62,7 +62,7 @@ behaviour_info(callbacks) -> {fetch, 2}, %% Acktags supplied are for messages which can now be forgotten - %% about. Must return 1 guid per Ack, in the same order as Acks. + %% about. Must return 1 msg_id per Ack, in the same order as Acks. {ack, 2}, %% A publish, but in the context of a transaction. -- cgit v1.2.1 From 5769f3263378c0d6fb48bee884e6f24cc65304b1 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:30:25 +0000 Subject: guid -> msg_id in vq except for #basic_message --- src/rabbit_variable_queue.erl | 220 +++++++++++++++++++++--------------------- 1 file changed, 110 insertions(+), 110 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 58a28d32..1d32cec6 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -156,7 +156,7 @@ %% segments. %% %% Pending acks are recorded in memory either as the tuple {SeqId, -%% Guid, MsgProps} (tuple-form) or as the message itself (message- +%% MsgId, MsgProps} (tuple-form) or as the message itself (message- %% form). Acks for persistent messages are always stored in the tuple- %% form. Acks for transient messages are also stored in tuple-form if %% the message has been sent to disk as part of the memory reduction @@ -261,7 +261,7 @@ -record(msg_status, { seq_id, - guid, + msg_id, msg, is_persistent, is_delivered, @@ -400,10 +400,10 @@ stop_msg_store() -> init(QueueName, IsDurable, Recover) -> Self = self(), init(QueueName, IsDurable, Recover, - fun (Guids, ActionTaken) -> - msgs_written_to_disk(Self, Guids, ActionTaken) + fun (MsgIds, ActionTaken) -> + msgs_written_to_disk(Self, MsgIds, ActionTaken) end, - fun (Guids) -> msg_indices_written_to_disk(Self, Guids) end). + fun (MsgIds) -> msg_indices_written_to_disk(Self, MsgIds) end). init(QueueName, IsDurable, false, MsgOnDiskFun, MsgIdxOnDiskFun) -> IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), @@ -432,8 +432,8 @@ init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> rabbit_queue_index:recover( QueueName, Terms1, rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE), - fun (Guid) -> - rabbit_msg_store:contains(Guid, PersistentClient) + fun (MsgId) -> + rabbit_msg_store:contains(MsgId, PersistentClient) end, MsgIdxOnDiskFun), init(true, IndexState, DeltaCount, Terms1, @@ -509,17 +509,17 @@ publish(Msg, MsgProps, State) -> {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), a(reduce_memory_use(State1)). -publish_delivered(false, #basic_message { guid = Guid }, +publish_delivered(false, #basic_message { guid = MsgId }, #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> case NeedsConfirming of - true -> blind_confirm(self(), gb_sets:singleton(Guid)); + true -> blind_confirm(self(), gb_sets:singleton(MsgId)); false -> ok end, {undefined, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, - guid = Guid }, + guid = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0, @@ -535,7 +535,7 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State), State2 = record_pending_ack(m(MsgStatus1), State1), PCount1 = PCount + one_if(IsPersistent1), - UC1 = gb_sets_maybe_insert(NeedsConfirming, Guid, UC), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), {SeqId, a(reduce_memory_use( State2 #vqstate { next_seq_id = SeqId + 1, out_counter = OutCount + 1, @@ -586,12 +586,12 @@ internal_queue_out(Fun, State = #vqstate { q4 = Q4 }) -> end. read_msg(MsgStatus = #msg_status { msg = undefined, - guid = Guid, + msg_id = MsgId, is_persistent = IsPersistent }, State = #vqstate { ram_msg_count = RamMsgCount, msg_store_clients = MSCState}) -> {{ok, Msg = #basic_message {}}, MSCState1} = - msg_store_read(MSCState, IsPersistent, Guid), + msg_store_read(MSCState, IsPersistent, MsgId), {MsgStatus #msg_status { msg = Msg }, State #vqstate { ram_msg_count = RamMsgCount + 1, msg_store_clients = MSCState1 }}; @@ -600,7 +600,7 @@ read_msg(MsgStatus, State) -> internal_fetch(AckRequired, MsgStatus = #msg_status { seq_id = SeqId, - guid = Guid, + msg_id = MsgId, msg = Msg, is_persistent = IsPersistent, is_delivered = IsDelivered, @@ -619,7 +619,7 @@ internal_fetch(AckRequired, MsgStatus = #msg_status { %% 2. Remove from msg_store and queue index, if necessary Rem = fun () -> - ok = msg_store_remove(MSCState, IsPersistent, [Guid]) + ok = msg_store_remove(MSCState, IsPersistent, [MsgId]) end, Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end, IndexState2 = @@ -678,7 +678,8 @@ tx_rollback(Txn, State = #vqstate { durable = IsDurable, #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), erase_tx(Txn), ok = case IsDurable of - true -> msg_store_remove(MSCState, true, persistent_guids(Pubs)); + true -> msg_store_remove(MSCState, true, + persistent_msg_ids(Pubs)); false -> ok end, {lists:append(AckTags), a(State)}. @@ -689,13 +690,13 @@ tx_commit(Txn, Fun, MsgPropsFun, #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), erase_tx(Txn), AckTags1 = lists:append(AckTags), - PersistentGuids = persistent_guids(Pubs), - HasPersistentPubs = PersistentGuids =/= [], + PersistentMsgIds = persistent_msg_ids(Pubs), + HasPersistentPubs = PersistentMsgIds =/= [], {AckTags1, a(case IsDurable andalso HasPersistentPubs of true -> ok = msg_store_sync( - MSCState, true, PersistentGuids, - msg_store_callback(PersistentGuids, Pubs, AckTags1, + MSCState, true, PersistentMsgIds, + msg_store_callback(PersistentMsgIds, Pubs, AckTags1, Fun, MsgPropsFun)), State; false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, @@ -713,10 +714,10 @@ requeue(AckTags, MsgPropsFun, State) -> {_SeqId, State2} = publish(Msg, MsgPropsFun1(MsgProps), true, false, State1), State2; - ({IsPersistent, Guid, MsgProps}, State1) -> + ({IsPersistent, MsgId, MsgProps}, State1) -> #vqstate { msg_store_clients = MSCState } = State1, {{ok, Msg = #basic_message{}}, MSCState1} = - msg_store_read(MSCState, IsPersistent, Guid), + msg_store_read(MSCState, IsPersistent, MsgId), State2 = State1 #vqstate { msg_store_clients = MSCState1 }, {_SeqId, State3} = publish(Msg, MsgPropsFun1(MsgProps), true, true, State2), @@ -905,12 +906,12 @@ cons_if(true, E, L) -> [E | L]; cons_if(false, _E, L) -> L. gb_sets_maybe_insert(false, _Val, Set) -> Set; -%% when requeueing, we re-add a guid to the unconfirmed set +%% when requeueing, we re-add a msg_id to the unconfirmed set gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). -msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = Guid }, +msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = MsgId }, MsgProps) -> - #msg_status { seq_id = SeqId, guid = Guid, msg = Msg, + #msg_status { seq_id = SeqId, msg_id = MsgId, msg = Msg, is_persistent = IsPersistent, is_delivered = false, msg_on_disk = false, index_on_disk = false, msg_props = MsgProps }. @@ -937,30 +938,30 @@ msg_store_client_init(MsgStore, Ref, MsgOnDiskFun) -> MsgStore, Ref, MsgOnDiskFun, msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE)). -msg_store_write(MSCState, IsPersistent, Guid, Msg) -> +msg_store_write(MSCState, IsPersistent, MsgId, Msg) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:write(Guid, Msg, MSCState1) end). + fun (MSCState1) -> rabbit_msg_store:write(MsgId, Msg, MSCState1) end). -msg_store_read(MSCState, IsPersistent, Guid) -> +msg_store_read(MSCState, IsPersistent, MsgId) -> with_msg_store_state( MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:read(Guid, MSCState1) end). + fun (MSCState1) -> rabbit_msg_store:read(MsgId, MSCState1) end). -msg_store_remove(MSCState, IsPersistent, Guids) -> +msg_store_remove(MSCState, IsPersistent, MsgIds) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MCSState1) -> rabbit_msg_store:remove(Guids, MCSState1) end). + fun (MCSState1) -> rabbit_msg_store:remove(MsgIds, MCSState1) end). -msg_store_release(MSCState, IsPersistent, Guids) -> +msg_store_release(MSCState, IsPersistent, MsgIds) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MCSState1) -> rabbit_msg_store:release(Guids, MCSState1) end). + fun (MCSState1) -> rabbit_msg_store:release(MsgIds, MCSState1) end). -msg_store_sync(MSCState, IsPersistent, Guids, Callback) -> +msg_store_sync(MSCState, IsPersistent, MsgIds, Fun) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:sync(Guids, Callback, MSCState1) end). + fun (MSCState1) -> rabbit_msg_store:sync(MsgIds, Fun, MSCState1) end). msg_store_close_fds(MSCState, IsPersistent) -> with_msg_store_state( @@ -994,21 +995,21 @@ store_tx(Txn, Tx) -> put({txn, Txn}, Tx). erase_tx(Txn) -> erase({txn, Txn}). -persistent_guids(Pubs) -> - [Guid || {#basic_message { guid = Guid, - is_persistent = true }, _MsgProps} <- Pubs]. +persistent_msg_ids(Pubs) -> + [MsgId || {#basic_message { guid = MsgId, + is_persistent = true }, _MsgProps} <- Pubs]. betas_from_index_entries(List, TransientThreshold, IndexState) -> {Filtered, Delivers, Acks} = lists:foldr( - fun ({Guid, SeqId, MsgProps, IsPersistent, IsDelivered}, + fun ({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}, {Filtered1, Delivers1, Acks1}) -> case SeqId < TransientThreshold andalso not IsPersistent of true -> {Filtered1, cons_if(not IsDelivered, SeqId, Delivers1), [SeqId | Acks1]}; false -> {[m(#msg_status { msg = undefined, - guid = Guid, + msg_id = MsgId, seq_id = SeqId, is_persistent = IsPersistent, is_delivered = IsDelivered, @@ -1114,7 +1115,7 @@ blank_rate(Timestamp, IngressLength) -> avg_ingress = 0.0, timestamp = Timestamp }. -msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> +msg_store_callback(PersistentMsgIds, Pubs, AckTags, Fun, MsgPropsFun) -> Self = self(), F = fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( Self, fun (StateN) -> {[], tx_commit_post_msg_store( @@ -1124,14 +1125,14 @@ msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> end, fun () -> spawn(fun () -> ok = rabbit_misc:with_exit_handler( fun () -> remove_persistent_messages( - PersistentGuids) + PersistentMsgIds) end, F) end) end. -remove_persistent_messages(Guids) -> +remove_persistent_messages(MsgIds) -> PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, undefined), - ok = rabbit_msg_store:remove(Guids, PersistentClient), + ok = rabbit_msg_store:remove(MsgIds, PersistentClient), rabbit_msg_store:client_delete_and_terminate(PersistentClient). tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags, Fun, MsgPropsFun, @@ -1149,7 +1150,7 @@ tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags, Fun, MsgPropsFun, case dict:fetch(AckTag, PA) of #msg_status {} -> false; - {IsPersistent, _Guid, _MsgProps} -> + {IsPersistent, _MsgId, _MsgProps} -> IsPersistent end]; false -> [] @@ -1215,38 +1216,38 @@ purge_betas_and_deltas(LensByStore, end. remove_queue_entries(Fold, Q, LensByStore, IndexState, MSCState) -> - {GuidsByStore, Delivers, Acks} = + {MsgIdsByStore, Delivers, Acks} = Fold(fun remove_queue_entries1/2, {orddict:new(), [], []}, Q), - ok = orddict:fold(fun (IsPersistent, Guids, ok) -> - msg_store_remove(MSCState, IsPersistent, Guids) - end, ok, GuidsByStore), - {sum_guids_by_store_to_len(LensByStore, GuidsByStore), + ok = orddict:fold(fun (IsPersistent, MsgIds, ok) -> + msg_store_remove(MSCState, IsPersistent, MsgIds) + end, ok, MsgIdsByStore), + {sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore), rabbit_queue_index:ack(Acks, rabbit_queue_index:deliver(Delivers, IndexState))}. remove_queue_entries1( - #msg_status { guid = Guid, seq_id = SeqId, + #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered, msg_on_disk = MsgOnDisk, index_on_disk = IndexOnDisk, is_persistent = IsPersistent }, - {GuidsByStore, Delivers, Acks}) -> + {MsgIdsByStore, Delivers, Acks}) -> {case MsgOnDisk of - true -> rabbit_misc:orddict_cons(IsPersistent, Guid, GuidsByStore); - false -> GuidsByStore + true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore); + false -> MsgIdsByStore end, cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), cons_if(IndexOnDisk, SeqId, Acks)}. -sum_guids_by_store_to_len(LensByStore, GuidsByStore) -> +sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) -> orddict:fold( - fun (IsPersistent, Guids, LensByStore1) -> - orddict:update_counter(IsPersistent, length(Guids), LensByStore1) - end, LensByStore, GuidsByStore). + fun (IsPersistent, MsgIds, LensByStore1) -> + orddict:update_counter(IsPersistent, length(MsgIds), LensByStore1) + end, LensByStore, MsgIdsByStore). %%---------------------------------------------------------------------------- %% Internal gubbins for publishing %%---------------------------------------------------------------------------- -publish(Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, +publish(Msg = #basic_message { is_persistent = IsPersistent, guid = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, IsDelivered, MsgOnDisk, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, @@ -1266,7 +1267,7 @@ publish(Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, true -> State1 #vqstate { q4 = queue:in(m(MsgStatus1), Q4) } end, PCount1 = PCount + one_if(IsPersistent1), - UC1 = gb_sets_maybe_insert(NeedsConfirming, Guid, UC), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), {SeqId, State2 #vqstate { next_seq_id = SeqId + 1, len = Len + 1, in_counter = InCount + 1, @@ -1278,14 +1279,14 @@ maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status { msg_on_disk = true }, _MSCState) -> MsgStatus; maybe_write_msg_to_disk(Force, MsgStatus = #msg_status { - msg = Msg, guid = Guid, + msg = Msg, msg_id = MsgId, is_persistent = IsPersistent }, MSCState) when Force orelse IsPersistent -> Msg1 = Msg #basic_message { %% don't persist any recoverable decoded properties content = rabbit_binary_parser:clear_decoded_content( Msg #basic_message.content)}, - ok = msg_store_write(MSCState, IsPersistent, Guid, Msg1), + ok = msg_store_write(MSCState, IsPersistent, MsgId, Msg1), MsgStatus #msg_status { msg_on_disk = true }; maybe_write_msg_to_disk(_Force, MsgStatus, _MSCState) -> MsgStatus. @@ -1295,7 +1296,7 @@ maybe_write_index_to_disk(_Force, MsgStatus = #msg_status { true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION {MsgStatus, IndexState}; maybe_write_index_to_disk(Force, MsgStatus = #msg_status { - guid = Guid, + msg_id = MsgId, seq_id = SeqId, is_persistent = IsPersistent, is_delivered = IsDelivered, @@ -1303,7 +1304,7 @@ maybe_write_index_to_disk(Force, MsgStatus = #msg_status { when Force orelse IsPersistent -> true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION IndexState1 = rabbit_queue_index:publish( - Guid, SeqId, MsgProps, IsPersistent, IndexState), + MsgId, SeqId, MsgProps, IsPersistent, IndexState), {MsgStatus #msg_status { index_on_disk = true }, maybe_write_delivered(IsDelivered, SeqId, IndexState1)}; maybe_write_index_to_disk(_Force, MsgStatus, IndexState) -> @@ -1322,7 +1323,7 @@ maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, %%---------------------------------------------------------------------------- record_pending_ack(#msg_status { seq_id = SeqId, - guid = Guid, + msg_id = MsgId, is_persistent = IsPersistent, msg_on_disk = MsgOnDisk, msg_props = MsgProps } = MsgStatus, @@ -1331,8 +1332,8 @@ record_pending_ack(#msg_status { seq_id = SeqId, ack_in_counter = AckInCount}) -> {AckEntry, RAI1} = case MsgOnDisk of - true -> {{IsPersistent, Guid, MsgProps}, RAI}; - false -> {MsgStatus, gb_trees:insert(SeqId, Guid, RAI)} + true -> {{IsPersistent, MsgId, MsgProps}, RAI}; + false -> {MsgStatus, gb_trees:insert(SeqId, MsgId, RAI)} end, PA1 = dict:store(SeqId, AckEntry, PA), State #vqstate { pending_ack = PA1, @@ -1343,28 +1344,28 @@ remove_pending_ack(KeepPersistent, State = #vqstate { pending_ack = PA, index_state = IndexState, msg_store_clients = MSCState }) -> - {PersistentSeqIds, GuidsByStore} = + {PersistentSeqIds, MsgIdsByStore} = dict:fold(fun accumulate_ack/3, accumulate_ack_init(), PA), State1 = State #vqstate { pending_ack = dict:new(), ram_ack_index = gb_trees:empty() }, case KeepPersistent of - true -> case orddict:find(false, GuidsByStore) of - error -> State1; - {ok, Guids} -> ok = msg_store_remove(MSCState, false, - Guids), + true -> case orddict:find(false, MsgIdsByStore) of + error -> State1; + {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, + MsgIds), State1 end; false -> IndexState1 = rabbit_queue_index:ack(PersistentSeqIds, IndexState), - [ok = msg_store_remove(MSCState, IsPersistent, Guids) - || {IsPersistent, Guids} <- orddict:to_list(GuidsByStore)], + [ok = msg_store_remove(MSCState, IsPersistent, MsgIds) + || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], State1 #vqstate { index_state = IndexState1 } end. ack(_MsgStoreFun, _Fun, [], State) -> State; ack(MsgStoreFun, Fun, AckTags, State) -> - {{PersistentSeqIds, GuidsByStore}, + {{PersistentSeqIds, MsgIdsByStore}, State1 = #vqstate { index_state = IndexState, msg_store_clients = MSCState, persistent_count = PCount, @@ -1380,10 +1381,10 @@ ack(MsgStoreFun, Fun, AckTags, State) -> gb_trees:delete_any(SeqId, RAI)})} end, {accumulate_ack_init(), State}, AckTags), IndexState1 = rabbit_queue_index:ack(PersistentSeqIds, IndexState), - [ok = MsgStoreFun(MSCState, IsPersistent, Guids) - || {IsPersistent, Guids} <- orddict:to_list(GuidsByStore)], - PCount1 = PCount - find_persistent_count(sum_guids_by_store_to_len( - orddict:new(), GuidsByStore)), + [ok = MsgStoreFun(MSCState, IsPersistent, MsgIds) + || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], + PCount1 = PCount - find_persistent_count(sum_msg_ids_by_store_to_len( + orddict:new(), MsgIdsByStore)), State1 #vqstate { index_state = IndexState1, persistent_count = PCount1, ack_out_counter = AckOutCount + length(AckTags) }. @@ -1393,12 +1394,12 @@ accumulate_ack_init() -> {[], orddict:new()}. accumulate_ack(_SeqId, #msg_status { is_persistent = false, %% ASSERTIONS msg_on_disk = false, index_on_disk = false }, - {PersistentSeqIdsAcc, GuidsByStore}) -> - {PersistentSeqIdsAcc, GuidsByStore}; -accumulate_ack(SeqId, {IsPersistent, Guid, _MsgProps}, - {PersistentSeqIdsAcc, GuidsByStore}) -> + {PersistentSeqIdsAcc, MsgIdsByStore}) -> + {PersistentSeqIdsAcc, MsgIdsByStore}; +accumulate_ack(SeqId, {IsPersistent, MsgId, _MsgProps}, + {PersistentSeqIdsAcc, MsgIdsByStore}) -> {cons_if(IsPersistent, SeqId, PersistentSeqIdsAcc), - rabbit_misc:orddict_cons(IsPersistent, Guid, GuidsByStore)}. + rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore)}. find_persistent_count(LensByStore) -> case orddict:find(true, LensByStore) of @@ -1417,12 +1418,12 @@ confirm_commit_index(State = #vqstate { index_state = IndexState }) -> false -> State end. -remove_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, +remove_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - State #vqstate { msgs_on_disk = gb_sets:difference(MOD, GuidSet), - msg_indices_on_disk = gb_sets:difference(MIOD, GuidSet), - unconfirmed = gb_sets:difference(UC, GuidSet) }. + State #vqstate { msgs_on_disk = gb_sets:difference(MOD, MsgIdSet), + msg_indices_on_disk = gb_sets:difference(MIOD, MsgIdSet), + unconfirmed = gb_sets:difference(UC, MsgIdSet) }. needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, unconfirmed = UC }) -> @@ -1439,37 +1440,37 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, %% subtraction. not (gb_sets:is_empty(UC) orelse gb_sets:is_subset(UC, MIOD)). -msgs_confirmed(GuidSet, State) -> - {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. +msgs_confirmed(MsgIdSet, State) -> + {gb_sets:to_list(MsgIdSet), remove_confirms(MsgIdSet, State)}. -blind_confirm(QPid, GuidSet) -> +blind_confirm(QPid, MsgIdSet) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State) -> msgs_confirmed(GuidSet, State) end). + QPid, fun (State) -> msgs_confirmed(MsgIdSet, State) end). -msgs_written_to_disk(QPid, GuidSet, removed) -> - blind_confirm(QPid, GuidSet); -msgs_written_to_disk(QPid, GuidSet, written) -> +msgs_written_to_disk(QPid, MsgIdSet, removed) -> + blind_confirm(QPid, MsgIdSet); +msgs_written_to_disk(QPid, MsgIdSet, written) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( QPid, fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), + Written = gb_sets:intersection(UC, MsgIdSet), + msgs_confirmed(gb_sets:intersection(MsgIdSet, MIOD), State #vqstate { msgs_on_disk = - gb_sets:union( - MOD, gb_sets:intersection(UC, GuidSet)) }) + gb_sets:union(MOD, Written) }) end). -msg_indices_written_to_disk(QPid, GuidSet) -> +msg_indices_written_to_disk(QPid, MsgIdSet) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( QPid, fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), + Written = gb_sets:intersection(UC, MsgIdSet), + msgs_confirmed(gb_sets:intersection(MsgIdSet, MOD), State #vqstate { msg_indices_on_disk = - gb_sets:union( - MIOD, gb_sets:intersection(UC, GuidSet)) }) + gb_sets:union(MIOD, Written) }) end). %%---------------------------------------------------------------------------- @@ -1547,17 +1548,16 @@ limit_ram_acks(Quota, State = #vqstate { pending_ack = PA, true -> {Quota, State}; false -> - {SeqId, Guid, RAI1} = gb_trees:take_largest(RAI), + {SeqId, MsgId, RAI1} = gb_trees:take_largest(RAI), MsgStatus = #msg_status { - guid = Guid, %% ASSERTION + msg_id = MsgId, %% ASSERTION is_persistent = false, %% ASSERTION msg_props = MsgProps } = dict:fetch(SeqId, PA), {_, State1} = maybe_write_to_disk(true, false, MsgStatus, State), + PA1 = dict:store(SeqId, {false, MsgId, MsgProps}, PA), limit_ram_acks(Quota - 1, - State1 #vqstate { - pending_ack = - dict:store(SeqId, {false, Guid, MsgProps}, PA), - ram_ack_index = RAI1 }) + State1 #vqstate { pending_ack = PA1, + ram_ack_index = RAI1 }) end. @@ -1818,9 +1818,9 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> multiple_routing_keys() -> transform_storage( fun ({basic_message, ExchangeName, Routing_Key, Content, - Guid, Persistent}) -> + MsgId, Persistent}) -> {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; + MsgId, Persistent}}; (_) -> {error, corrupt_message} end), ok. -- cgit v1.2.1 From 8569560c351598e90c38b2a794b1d46b96347b76 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:36:49 +0000 Subject: #basic_message.guid -> id --- include/rabbit.hrl | 2 +- src/rabbit_amqqueue_process.erl | 2 +- src/rabbit_basic.erl | 12 ++++++------ src/rabbit_types.erl | 2 +- src/rabbit_variable_queue.erl | 10 +++++----- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 4d75b546..9f483c30 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -62,7 +62,7 @@ -record(listener, {node, protocol, host, ip_address, port}). --record(basic_message, {exchange_name, routing_keys = [], content, guid, +-record(basic_message, {exchange_name, routing_keys = [], content, id, is_persistent}). -record(ssl_socket, {tcp, ssl}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 44053593..57426e13 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -433,7 +433,7 @@ record_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { is_persistent = true, - guid = Guid}}, + id = Guid}}, State = #q{guid_to_channel = GTC, q = #amqqueue{durable = true}}) -> diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 57aad808..43230f30 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -116,12 +116,12 @@ message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> try {ok, #basic_message{ - exchange_name = ExchangeName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - guid = rabbit_guid:guid(), - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | - header_routes(Props#'P_basic'.headers)]}} + exchange_name = ExchangeName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + id = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | + header_routes(Props#'P_basic'.headers)]}} catch {error, _Reason} = Error -> Error end. diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 899291f2..90dfd38d 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -67,7 +67,7 @@ #basic_message{exchange_name :: rabbit_exchange:name(), routing_keys :: [rabbit_router:routing_key()], content :: content(), - guid :: msg_id(), + id :: msg_id(), is_persistent :: boolean()}). -type(message() :: basic_message()). -type(delivery() :: diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 1d32cec6..0c4c06e8 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -509,7 +509,7 @@ publish(Msg, MsgProps, State) -> {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), a(reduce_memory_use(State1)). -publish_delivered(false, #basic_message { guid = MsgId }, +publish_delivered(false, #basic_message { id = MsgId }, #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> @@ -519,7 +519,7 @@ publish_delivered(false, #basic_message { guid = MsgId }, end, {undefined, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, - guid = MsgId }, + id = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0, @@ -909,7 +909,7 @@ gb_sets_maybe_insert(false, _Val, Set) -> Set; %% when requeueing, we re-add a msg_id to the unconfirmed set gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). -msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = MsgId }, +msg_status(IsPersistent, SeqId, Msg = #basic_message { id = MsgId }, MsgProps) -> #msg_status { seq_id = SeqId, msg_id = MsgId, msg = Msg, is_persistent = IsPersistent, is_delivered = false, @@ -996,7 +996,7 @@ store_tx(Txn, Tx) -> put({txn, Txn}, Tx). erase_tx(Txn) -> erase({txn, Txn}). persistent_msg_ids(Pubs) -> - [MsgId || {#basic_message { guid = MsgId, + [MsgId || {#basic_message { id = MsgId, is_persistent = true }, _MsgProps} <- Pubs]. betas_from_index_entries(List, TransientThreshold, IndexState) -> @@ -1247,7 +1247,7 @@ sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) -> %% Internal gubbins for publishing %%---------------------------------------------------------------------------- -publish(Msg = #basic_message { is_persistent = IsPersistent, guid = MsgId }, +publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, IsDelivered, MsgOnDisk, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, -- cgit v1.2.1 From 1bd39c0325baec4014cb05654f2be02f8843fdc8 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:46:21 +0000 Subject: guid -> msg_id in amqqueue_process --- src/rabbit_amqqueue_process.erl | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 57426e13..650b6a68 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -46,7 +46,7 @@ rate_timer_ref, expiry_timer_ref, stats_timer, - guid_to_channel, + msg_id_to_channel, ttl, ttl_timer_ref }). @@ -112,7 +112,7 @@ init(Q) -> expiry_timer_ref = undefined, ttl = undefined, stats_timer = rabbit_event:init_stats_timer(), - guid_to_channel = dict:new()}, hibernate, + msg_id_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. terminate(shutdown, State = #q{backing_queue = BQ}) -> @@ -404,22 +404,22 @@ deliver_from_queue_deliver(AckRequired, false, State) -> fetch(AckRequired, State), {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. -confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> - {CMs, GTC1} = +confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> + {CMs, MTC1} = lists:foldl( - fun(Guid, {CMs, GTC0}) -> - case dict:find(Guid, GTC0) of + fun(MsgId, {CMs, MTC0}) -> + case dict:find(MsgId, MTC0) of {ok, {ChPid, MsgSeqNo}} -> {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, GTC0)}; + dict:erase(MsgId, MTC0)}; _ -> - {CMs, GTC0} + {CMs, MTC0} end - end, {gb_trees:empty(), GTC}, Guids), + end, {gb_trees:empty(), MTC}, MsgIds), gb_trees:map(fun(ChPid, MsgSeqNos) -> rabbit_channel:confirm(ChPid, MsgSeqNos) end, CMs), - State#q{guid_to_channel = GTC1}. + State#q{msg_id_to_channel = MTC1}. gb_trees_cons(Key, Value, Tree) -> case gb_trees:lookup(Key, Tree) of @@ -433,12 +433,12 @@ record_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { is_persistent = true, - id = Guid}}, + id = MsgId}}, State = - #q{guid_to_channel = GTC, - q = #amqqueue{durable = true}}) -> + #q{msg_id_to_channel = MTC, + q = #amqqueue{durable = true}}) -> {confirm, - State#q{guid_to_channel = dict:store(Guid, {ChPid, MsgSeqNo}, GTC)}}; + State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; record_confirm_message(_Delivery, State) -> {no_confirm, State}. @@ -618,9 +618,9 @@ backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State). maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> - {Guids, BQS1} = Fun(BQS), + {MsgIds, BQS1} = Fun(BQS), run_message_queue( - confirm_messages(Guids, State#q{backing_queue_state = BQS1})). + confirm_messages(MsgIds, State#q{backing_queue_state = BQS1})). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, State = #q{backing_queue = BQ, @@ -767,8 +767,8 @@ prioritise_cast(Msg, _State) -> maybe_expire -> 8; drop_expired -> 8; emit_stats -> 7; - {ack, _Txn, _MsgIds, _ChPid} -> 7; - {reject, _MsgIds, _Requeue, _ChPid} -> 7; + {ack, _Txn, _AckTags, _ChPid} -> 7; + {reject, _AckTags, _Requeue, _ChPid} -> 7; {notify_sent, _ChPid} -> 7; {unblock, _ChPid} -> 7; {maybe_run_queue_via_backing_queue, _Fun} -> 6; -- cgit v1.2.1 From 1076e2220865be678888d3ec1fd2799bdb55da60 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:58:26 +0000 Subject: guid -> msg_id in tests --- src/rabbit_tests.erl | 200 +++++++++++++++++++++++++-------------------------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c6250df..2def7573 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1602,50 +1602,50 @@ restart_msg_store_empty() -> ok = rabbit_variable_queue:start_msg_store( undefined, {fun (ok) -> finished end, ok}). -guid_bin(X) -> +msg_id_bin(X) -> erlang:md5(term_to_binary(X)). msg_store_client_init(MsgStore, Ref) -> rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined). -msg_store_contains(Atom, Guids, MSCState) -> +msg_store_contains(Atom, MsgIds, MSCState) -> Atom = lists:foldl( - fun (Guid, Atom1) when Atom1 =:= Atom -> - rabbit_msg_store:contains(Guid, MSCState) end, - Atom, Guids). + fun (MsgId, Atom1) when Atom1 =:= Atom -> + rabbit_msg_store:contains(MsgId, MSCState) end, + Atom, MsgIds). -msg_store_sync(Guids, MSCState) -> +msg_store_sync(MsgIds, MSCState) -> Ref = make_ref(), Self = self(), - ok = rabbit_msg_store:sync(Guids, fun () -> Self ! {sync, Ref} end, + ok = rabbit_msg_store:sync(MsgIds, fun () -> Self ! {sync, Ref} end, MSCState), receive {sync, Ref} -> ok after 10000 -> - io:format("Sync from msg_store missing for guids ~p~n", [Guids]), + io:format("Sync from msg_store missing for msg_ids ~p~n", [MsgIds]), throw(timeout) end. -msg_store_read(Guids, MSCState) -> - lists:foldl(fun (Guid, MSCStateM) -> - {{ok, Guid}, MSCStateN} = rabbit_msg_store:read( - Guid, MSCStateM), +msg_store_read(MsgIds, MSCState) -> + lists:foldl(fun (MsgId, MSCStateM) -> + {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read( + MsgId, MSCStateM), MSCStateN - end, MSCState, Guids). + end, MSCState, MsgIds). -msg_store_write(Guids, MSCState) -> - ok = lists:foldl( - fun (Guid, ok) -> rabbit_msg_store:write(Guid, Guid, MSCState) end, - ok, Guids). +msg_store_write(MsgIds, MSCState) -> + ok = lists:foldl(fun (MsgId, ok) -> + rabbit_msg_store:write(MsgId, MsgId, MSCState) + end, ok, MsgIds). -msg_store_remove(Guids, MSCState) -> - rabbit_msg_store:remove(Guids, MSCState). +msg_store_remove(MsgIds, MSCState) -> + rabbit_msg_store:remove(MsgIds, MSCState). -msg_store_remove(MsgStore, Ref, Guids) -> +msg_store_remove(MsgStore, Ref, MsgIds) -> with_msg_store_client(MsgStore, Ref, fun (MSCStateM) -> - ok = msg_store_remove(Guids, MSCStateM), + ok = msg_store_remove(MsgIds, MSCStateM), MSCStateM end). @@ -1655,140 +1655,140 @@ with_msg_store_client(MsgStore, Ref, Fun) -> foreach_with_msg_store_client(MsgStore, Ref, Fun, L) -> rabbit_msg_store:client_terminate( - lists:foldl(fun (Guid, MSCState) -> Fun(Guid, MSCState) end, + lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end, msg_store_client_init(MsgStore, Ref), L)). test_msg_store() -> restart_msg_store_empty(), Self = self(), - Guids = [guid_bin(M) || M <- lists:seq(1,100)], - {Guids1stHalf, Guids2ndHalf} = lists:split(50, Guids), + MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)], + {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(50, MsgIds), Ref = rabbit_guid:guid(), MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), %% check we don't contain any of the msgs we're about to publish - false = msg_store_contains(false, Guids, MSCState), + false = msg_store_contains(false, MsgIds, MSCState), %% publish the first half - ok = msg_store_write(Guids1stHalf, MSCState), + ok = msg_store_write(MsgIds1stHalf, MSCState), %% sync on the first half - ok = msg_store_sync(Guids1stHalf, MSCState), + ok = msg_store_sync(MsgIds1stHalf, MSCState), %% publish the second half - ok = msg_store_write(Guids2ndHalf, MSCState), + ok = msg_store_write(MsgIds2ndHalf, MSCState), %% sync on the first half again - the msg_store will be dirty, but %% we won't need the fsync - ok = msg_store_sync(Guids1stHalf, MSCState), + ok = msg_store_sync(MsgIds1stHalf, MSCState), %% check they're all in there - true = msg_store_contains(true, Guids, MSCState), + true = msg_store_contains(true, MsgIds, MSCState), %% publish the latter half twice so we hit the caching and ref count code - ok = msg_store_write(Guids2ndHalf, MSCState), + ok = msg_store_write(MsgIds2ndHalf, MSCState), %% check they're still all in there - true = msg_store_contains(true, Guids, MSCState), + true = msg_store_contains(true, MsgIds, MSCState), %% sync on the 2nd half, but do lots of individual syncs to try %% and cause coalescing to happen ok = lists:foldl( - fun (Guid, ok) -> rabbit_msg_store:sync( - [Guid], fun () -> Self ! {sync, Guid} end, - MSCState) - end, ok, Guids2ndHalf), + fun (MsgId, ok) -> rabbit_msg_store:sync( + [MsgId], fun () -> Self ! {sync, MsgId} end, + MSCState) + end, ok, MsgIds2ndHalf), lists:foldl( - fun(Guid, ok) -> + fun(MsgId, ok) -> receive - {sync, Guid} -> ok + {sync, MsgId} -> ok after 10000 -> - io:format("Sync from msg_store missing (guid: ~p)~n", - [Guid]), + io:format("Sync from msg_store missing (msg_id: ~p)~n", + [MsgId]), throw(timeout) end - end, ok, Guids2ndHalf), + end, ok, MsgIds2ndHalf), %% it's very likely we're not dirty here, so the 1st half sync %% should hit a different code path - ok = msg_store_sync(Guids1stHalf, MSCState), + ok = msg_store_sync(MsgIds1stHalf, MSCState), %% read them all - MSCState1 = msg_store_read(Guids, MSCState), + MSCState1 = msg_store_read(MsgIds, MSCState), %% read them all again - this will hit the cache, not disk - MSCState2 = msg_store_read(Guids, MSCState1), + MSCState2 = msg_store_read(MsgIds, MSCState1), %% remove them all - ok = rabbit_msg_store:remove(Guids, MSCState2), + ok = rabbit_msg_store:remove(MsgIds, MSCState2), %% check first half doesn't exist - false = msg_store_contains(false, Guids1stHalf, MSCState2), + false = msg_store_contains(false, MsgIds1stHalf, MSCState2), %% check second half does exist - true = msg_store_contains(true, Guids2ndHalf, MSCState2), + true = msg_store_contains(true, MsgIds2ndHalf, MSCState2), %% read the second half again - MSCState3 = msg_store_read(Guids2ndHalf, MSCState2), + MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2), %% release the second half, just for fun (aka code coverage) - ok = rabbit_msg_store:release(Guids2ndHalf, MSCState3), + ok = rabbit_msg_store:release(MsgIds2ndHalf, MSCState3), %% read the second half again, just for fun (aka code coverage) - MSCState4 = msg_store_read(Guids2ndHalf, MSCState3), + MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3), ok = rabbit_msg_store:client_terminate(MSCState4), %% stop and restart, preserving every other msg in 2nd half ok = rabbit_variable_queue:stop_msg_store(), ok = rabbit_variable_queue:start_msg_store( [], {fun ([]) -> finished; - ([Guid|GuidsTail]) - when length(GuidsTail) rem 2 == 0 -> - {Guid, 1, GuidsTail}; - ([Guid|GuidsTail]) -> - {Guid, 0, GuidsTail} - end, Guids2ndHalf}), + ([MsgId|MsgIdsTail]) + when length(MsgIdsTail) rem 2 == 0 -> + {MsgId, 1, MsgIdsTail}; + ([MsgId|MsgIdsTail]) -> + {MsgId, 0, MsgIdsTail} + end, MsgIds2ndHalf}), MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), %% check we have the right msgs left lists:foldl( - fun (Guid, Bool) -> - not(Bool = rabbit_msg_store:contains(Guid, MSCState5)) - end, false, Guids2ndHalf), + fun (MsgId, Bool) -> + not(Bool = rabbit_msg_store:contains(MsgId, MSCState5)) + end, false, MsgIds2ndHalf), ok = rabbit_msg_store:client_terminate(MSCState5), %% restart empty restart_msg_store_empty(), MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), %% check we don't contain any of the msgs - false = msg_store_contains(false, Guids, MSCState6), + false = msg_store_contains(false, MsgIds, MSCState6), %% publish the first half again - ok = msg_store_write(Guids1stHalf, MSCState6), + ok = msg_store_write(MsgIds1stHalf, MSCState6), %% this should force some sort of sync internally otherwise misread ok = rabbit_msg_store:client_terminate( - msg_store_read(Guids1stHalf, MSCState6)), + msg_store_read(MsgIds1stHalf, MSCState6)), MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), - ok = rabbit_msg_store:remove(Guids1stHalf, MSCState7), + ok = rabbit_msg_store:remove(MsgIds1stHalf, MSCState7), ok = rabbit_msg_store:client_terminate(MSCState7), %% restart empty - restart_msg_store_empty(), %% now safe to reuse guids + restart_msg_store_empty(), %% now safe to reuse msg_ids %% push a lot of msgs in... at least 100 files worth {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit), PayloadSizeBits = 65536, BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)), - GuidsBig = [guid_bin(X) || X <- lists:seq(1, BigCount)], + MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)], Payload = << 0:PayloadSizeBits >>, ok = with_msg_store_client( ?PERSISTENT_MSG_STORE, Ref, fun (MSCStateM) -> - [ok = rabbit_msg_store:write(Guid, Payload, MSCStateM) || - Guid <- GuidsBig], + [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) || + MsgId <- MsgIdsBig], MSCStateM end), %% now read them to ensure we hit the fast client-side reading ok = foreach_with_msg_store_client( ?PERSISTENT_MSG_STORE, Ref, - fun (Guid, MSCStateM) -> + fun (MsgId, MSCStateM) -> {{ok, Payload}, MSCStateN} = rabbit_msg_store:read( - Guid, MSCStateM), + MsgId, MSCStateM), MSCStateN - end, GuidsBig), + end, MsgIdsBig), %% .., then 3s by 1... ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, - [guid_bin(X) || X <- lists:seq(BigCount, 1, -3)]), + [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]), %% .., then remove 3s by 2, from the young end first. This hits %% GC (under 50% good data left, but no empty files. Must GC). ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, - [guid_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]), + [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]), %% .., then remove 3s by 3, from the young end first. This hits %% GC... ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, - [guid_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]), + [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]), %% ensure empty ok = with_msg_store_client( ?PERSISTENT_MSG_STORE, Ref, fun (MSCStateM) -> - false = msg_store_contains(false, GuidsBig, MSCStateM), + false = msg_store_contains(false, MsgIdsBig, MSCStateM), MSCStateM end), %% restart empty @@ -1808,8 +1808,8 @@ init_test_queue() -> PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef), Res = rabbit_queue_index:recover( TestQueue, Terms, false, - fun (Guid) -> - rabbit_msg_store:contains(Guid, PersistentClient) + fun (MsgId) -> + rabbit_msg_store:contains(MsgId, PersistentClient) end, fun nop/1), ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient), @@ -1840,25 +1840,25 @@ queue_index_publish(SeqIds, Persistent, Qi) -> false -> ?TRANSIENT_MSG_STORE end, MSCState = msg_store_client_init(MsgStore, Ref), - {A, B = [{_SeqId, LastGuidWritten} | _]} = + {A, B = [{_SeqId, LastMsgIdWritten} | _]} = lists:foldl( - fun (SeqId, {QiN, SeqIdsGuidsAcc}) -> - Guid = rabbit_guid:guid(), + fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) -> + MsgId = rabbit_guid:guid(), QiM = rabbit_queue_index:publish( - Guid, SeqId, #message_properties{}, Persistent, QiN), - ok = rabbit_msg_store:write(Guid, Guid, MSCState), - {QiM, [{SeqId, Guid} | SeqIdsGuidsAcc]} + MsgId, SeqId, #message_properties{}, Persistent, QiN), + ok = rabbit_msg_store:write(MsgId, MsgId, MSCState), + {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]} end, {Qi, []}, SeqIds), %% do this just to force all of the publishes through to the msg_store: - true = rabbit_msg_store:contains(LastGuidWritten, MSCState), + true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState), ok = rabbit_msg_store:client_delete_and_terminate(MSCState), {A, B}. verify_read_with_published(_Delivered, _Persistent, [], _) -> ok; verify_read_with_published(Delivered, Persistent, - [{Guid, SeqId, _Props, Persistent, Delivered}|Read], - [{SeqId, Guid}|Published]) -> + [{MsgId, SeqId, _Props, Persistent, Delivered}|Read], + [{SeqId, MsgId}|Published]) -> verify_read_with_published(Delivered, Persistent, Read, Published); verify_read_with_published(_Delivered, _Persistent, _Read, _Published) -> ko. @@ -1866,10 +1866,10 @@ verify_read_with_published(_Delivered, _Persistent, _Read, _Published) -> test_queue_index_props() -> with_empty_test_queue( fun(Qi0) -> - Guid = rabbit_guid:guid(), + MsgId = rabbit_guid:guid(), Props = #message_properties{expiry=12345}, - Qi1 = rabbit_queue_index:publish(Guid, 1, Props, true, Qi0), - {[{Guid, 1, Props, _, _}], Qi2} = + Qi1 = rabbit_queue_index:publish(MsgId, 1, Props, true, Qi0), + {[{MsgId, 1, Props, _, _}], Qi2} = rabbit_queue_index:read(1, 2, Qi1), Qi2 end), @@ -1891,19 +1891,19 @@ test_queue_index() -> with_empty_test_queue( fun (Qi0) -> {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0), - {Qi2, SeqIdsGuidsA} = queue_index_publish(SeqIdsA, false, Qi1), + {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1), {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2), {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3), ok = verify_read_with_published(false, false, ReadA, - lists:reverse(SeqIdsGuidsA)), + lists:reverse(SeqIdsMsgIdsA)), %% should get length back as 0, as all the msgs were transient {0, Qi6} = restart_test_queue(Qi4), {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6), - {Qi8, SeqIdsGuidsB} = queue_index_publish(SeqIdsB, true, Qi7), + {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7), {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8), {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9), ok = verify_read_with_published(false, true, ReadB, - lists:reverse(SeqIdsGuidsB)), + lists:reverse(SeqIdsMsgIdsB)), %% should get length back as MostOfASegment LenB = length(SeqIdsB), {LenB, Qi12} = restart_test_queue(Qi10), @@ -1911,7 +1911,7 @@ test_queue_index() -> Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13), {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14), ok = verify_read_with_published(true, true, ReadC, - lists:reverse(SeqIdsGuidsB)), + lists:reverse(SeqIdsMsgIdsB)), Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15), Qi17 = rabbit_queue_index:flush(Qi16), %% Everything will have gone now because #pubs == #acks @@ -1927,12 +1927,12 @@ test_queue_index() -> %% a) partial pub+del+ack, then move to new segment with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsC} = queue_index_publish(SeqIdsC, + {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC, false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2), Qi4 = rabbit_queue_index:flush(Qi3), - {Qi5, _SeqIdsGuidsC1} = queue_index_publish([SegmentSize], + {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize], false, Qi4), Qi5 end), @@ -1940,10 +1940,10 @@ test_queue_index() -> %% b) partial pub+del, then move to new segment, then ack all in old segment with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsC2} = queue_index_publish(SeqIdsC, + {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC, false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), - {Qi3, _SeqIdsGuidsC3} = queue_index_publish([SegmentSize], + {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize], false, Qi2), Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3), rabbit_queue_index:flush(Qi4) @@ -1952,7 +1952,7 @@ test_queue_index() -> %% c) just fill up several segments of all pubs, then +dels, then +acks with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsD} = queue_index_publish(SeqIdsD, + {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD, false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1), Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2), @@ -1986,12 +1986,12 @@ test_queue_index() -> %% exercise journal_minus_segment, not segment_plus_journal. with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsE} = queue_index_publish([0,1,2,4,5,7], + {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7], true, Qi0), Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1), Qi3 = rabbit_queue_index:ack([0], Qi2), {5, Qi4} = restart_test_queue(Qi3), - {Qi5, _SeqIdsGuidsF} = queue_index_publish([3,6,8], true, Qi4), + {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4), Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5), Qi7 = rabbit_queue_index:ack([1,2,3], Qi6), {5, Qi8} = restart_test_queue(Qi7), -- cgit v1.2.1 From 21525c0ad768914786c92b8a65ccf7baa42b13a6 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 19:32:39 +0000 Subject: cosmetic --- src/file_handle_cache.erl | 6 +-- src/gm.erl | 24 ++++----- src/gm_tests.erl | 14 ++--- src/rabbit_amqqueue.erl | 16 +++--- src/rabbit_amqqueue_process.erl | 22 ++++---- src/rabbit_auth_backend_internal.erl | 4 +- src/rabbit_auth_mechanism_amqplain.erl | 2 +- src/rabbit_basic.erl | 34 ++++++------ src/rabbit_binding.erl | 2 +- src/rabbit_channel.erl | 6 +-- src/rabbit_channel_sup.erl | 12 ++--- src/rabbit_client_sup.erl | 4 +- src/rabbit_direct.erl | 22 ++++---- src/rabbit_event.erl | 2 +- src/rabbit_exchange.erl | 8 +-- src/rabbit_exchange_type_topic.erl | 34 ++++++------ src/rabbit_memory_monitor.erl | 10 ++-- src/rabbit_misc.erl | 2 +- src/rabbit_mnesia.erl | 8 +-- src/rabbit_msg_file.erl | 4 +- src/rabbit_msg_store.erl | 4 +- src/rabbit_networking.erl | 4 +- src/rabbit_node_monitor.erl | 2 +- src/rabbit_prelaunch.erl | 14 ++--- src/rabbit_queue_index.erl | 10 ++-- src/rabbit_ssl.erl | 6 +-- src/rabbit_tests.erl | 56 ++++++++++---------- src/rabbit_types.erl | 96 +++++++++++++++++----------------- src/rabbit_upgrade.erl | 6 +-- src/rabbit_variable_queue.erl | 4 +- src/rabbit_vhost.erl | 18 +++---- 31 files changed, 228 insertions(+), 228 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index f41815d0..855427dd 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -242,7 +242,7 @@ -> val_or_error(ref())). -spec(close/1 :: (ref()) -> ok_or_error()). -spec(read/2 :: (ref(), non_neg_integer()) -> - val_or_error([char()] | binary()) | 'eof'). + val_or_error([char()] | binary()) | 'eof'). -spec(append/2 :: (ref(), iodata()) -> ok_or_error()). -spec(sync/1 :: (ref()) -> ok_or_error()). -spec(position/2 :: (ref(), position()) -> val_or_error(offset())). @@ -252,7 +252,7 @@ -spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())). -spec(flush/1 :: (ref()) -> ok_or_error()). -spec(copy/3 :: (ref(), ref(), non_neg_integer()) -> - val_or_error(non_neg_integer())). + val_or_error(non_neg_integer())). -spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok'). -spec(delete/1 :: (ref()) -> ok_or_error()). -spec(clear/1 :: (ref()) -> ok_or_error()). @@ -1117,7 +1117,7 @@ reduce(State = #fhc_state { open_pending = OpenPending, case CStates of [] -> ok; _ -> case (Sum / ClientCount) - - (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of + (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of AverageAge when AverageAge > 0 -> notify_age(CStates, AverageAge); _ -> diff --git a/src/gm.erl b/src/gm.erl index 70633a08..fd8d9b77 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -852,9 +852,9 @@ alive_view_members({_Ver, View}) -> all_known_members({_Ver, View}) -> ?DICT:fold( - fun (Member, #view_member { aliases = Aliases }, Acc) -> - ?SETS:to_list(Aliases) ++ [Member | Acc] - end, [], View). + fun (Member, #view_member { aliases = Aliases }, Acc) -> + ?SETS:to_list(Aliases) ++ [Member | Acc] + end, [], View). group_to_view(#gm_group { members = Members, version = Ver }) -> Alive = lists:filter(fun is_member_alive/1, Members), @@ -1037,15 +1037,15 @@ maybe_erase_aliases(State = #state { self = Self, #view_member { aliases = Aliases } = fetch_view_member(Self, View), {Erasable, MembersState1} = ?SETS:fold( - fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> - #member { last_pub = LP, last_ack = LA } = - find_member_or_blank(Id, MembersState), - case can_erase_view_member(Self, Id, LA, LP) of - true -> {[Id | ErasableAcc], - erase_member(Id, MembersStateAcc)}; - false -> Acc - end - end, {[], MembersState}, Aliases), + fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> + #member { last_pub = LP, last_ack = LA } = + find_member_or_blank(Id, MembersState), + case can_erase_view_member(Self, Id, LA, LP) of + true -> {[Id | ErasableAcc], + erase_member(Id, MembersStateAcc)}; + false -> Acc + end + end, {[], MembersState}, Aliases), State1 = State #state { members_state = MembersState1 }, case Erasable of [] -> {ok, State1}; diff --git a/src/gm_tests.erl b/src/gm_tests.erl index 65e9cff0..ca0ffd64 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -117,13 +117,13 @@ test_broadcast(Fun) -> with_two_members(test_broadcast_fun(Fun)). test_broadcast_fun(Fun) -> - fun (Pid, Pid2) -> - ok = Fun(Pid, magic_message), - passed = receive_or_throw({msg, Pid, Pid, magic_message}, - timeout_waiting_for_msg), - passed = receive_or_throw({msg, Pid2, Pid, magic_message}, - timeout_waiting_for_msg) - end. + fun (Pid, Pid2) -> + ok = Fun(Pid, magic_message), + passed = receive_or_throw({msg, Pid, Pid, magic_message}, + timeout_waiting_for_msg), + passed = receive_or_throw({msg, Pid2, Pid, magic_message}, + timeout_waiting_for_msg) + end. with_two_members(Fun) -> ok = gm:create_tables(), diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 46b78c39..7a996a98 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -52,7 +52,7 @@ -type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}). -type(msg_id() :: non_neg_integer()). -type(ok_or_errors() :: - 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}). + 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}). -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). @@ -100,13 +100,13 @@ -spec(emit_stats/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete_immediately/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete/3 :: - (rabbit_types:amqqueue(), 'false', 'false') + (rabbit_types:amqqueue(), 'false', 'false') -> qlen(); - (rabbit_types:amqqueue(), 'true' , 'false') + (rabbit_types:amqqueue(), 'true' , 'false') -> qlen() | rabbit_types:error('in_use'); - (rabbit_types:amqqueue(), 'false', 'true' ) + (rabbit_types:amqqueue(), 'false', 'true' ) -> qlen() | rabbit_types:error('not_empty'); - (rabbit_types:amqqueue(), 'true' , 'true' ) + (rabbit_types:amqqueue(), 'true' , 'true' ) -> qlen() | rabbit_types:error('in_use') | rabbit_types:error('not_empty')). @@ -122,10 +122,10 @@ -spec(notify_down_all/2 :: ([pid()], pid()) -> ok_or_errors()). -spec(limit_all/3 :: ([pid()], pid(), pid() | 'undefined') -> ok_or_errors()). -spec(basic_get/3 :: (rabbit_types:amqqueue(), pid(), boolean()) -> - {'ok', non_neg_integer(), qmsg()} | 'empty'). + {'ok', non_neg_integer(), qmsg()} | 'empty'). -spec(basic_consume/7 :: - (rabbit_types:amqqueue(), boolean(), pid(), pid() | 'undefined', - rabbit_types:ctag(), boolean(), any()) + (rabbit_types:amqqueue(), boolean(), pid(), pid() | 'undefined', + rabbit_types:ctag(), boolean(), any()) -> rabbit_types:ok_or_error('exclusive_consume_unavailable')). -spec(basic_cancel/4 :: (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok'). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 44053593..dde87b69 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -485,7 +485,7 @@ attempt_delivery(#delivery{txn = Txn, message = Message}, {NeedsConfirming, State = #q{backing_queue = BQ, - backing_queue_state = BQS}}) -> + backing_queue_state = BQS}}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), {true, NeedsConfirming, @@ -722,10 +722,10 @@ i(Item, _) -> consumers(#q{active_consumers = ActiveConsumers, blocked_consumers = BlockedConsumers}) -> rabbit_misc:queue_fold( - fun ({ChPid, #consumer{tag = ConsumerTag, - ack_required = AckRequired}}, Acc) -> - [{ChPid, ConsumerTag, AckRequired} | Acc] - end, [], queue:join(ActiveConsumers, BlockedConsumers)). + fun ({ChPid, #consumer{tag = ConsumerTag, + ack_required = AckRequired}}, Acc) -> + [{ChPid, ConsumerTag, AckRequired} | Acc] + end, [], queue:join(ActiveConsumers, BlockedConsumers)). emit_stats(State) -> emit_stats(State, []). @@ -906,15 +906,15 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid, case is_ch_blocked(C) of true -> State1#q{ blocked_consumers = - add_consumer( - ChPid, Consumer, - State1#q.blocked_consumers)}; + add_consumer( + ChPid, Consumer, + State1#q.blocked_consumers)}; false -> run_message_queue( State1#q{ active_consumers = - add_consumer( - ChPid, Consumer, - State1#q.active_consumers)}) + add_consumer( + ChPid, Consumer, + State1#q.active_consumers)}) end, emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, not NoAck), diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index a564480b..3d005845 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -52,8 +52,8 @@ -spec(clear_admin/1 :: (rabbit_types:username()) -> 'ok'). -spec(list_users/0 :: () -> [{rabbit_types:username(), boolean()}]). -spec(lookup_user/1 :: (rabbit_types:username()) - -> rabbit_types:ok(rabbit_types:internal_user()) - | rabbit_types:error('not_found')). + -> rabbit_types:ok(rabbit_types:internal_user()) + | rabbit_types:error('not_found')). -spec(set_permissions/5 ::(rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(), regexp()) -> 'ok'). -spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost()) diff --git a/src/rabbit_auth_mechanism_amqplain.erl b/src/rabbit_auth_mechanism_amqplain.erl index 2168495d..b8682a46 100644 --- a/src/rabbit_auth_mechanism_amqplain.erl +++ b/src/rabbit_auth_mechanism_amqplain.erl @@ -54,5 +54,5 @@ handle_response(Response, _State) -> _ -> {protocol_error, "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field", - [LoginTable]} + [LoginTable]} end. diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 57aad808..8c930502 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -44,7 +44,7 @@ -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), rabbit_types:decoded_content()) -> - rabbit_types:ok_or_error2(rabbit_types:message(), any())). + rabbit_types:ok_or_error2(rabbit_types:message(), any())). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -107,21 +107,21 @@ strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} false -> DecodedContent; {value, Found} -> Headers0 = lists:delete(Found, Headers), rabbit_binary_generator:clear_encoded_content( - DecodedContent#content{ - properties = Props#'P_basic'{ - headers = Headers0}}) + DecodedContent#content{ + properties = Props#'P_basic'{ + headers = Headers0}}) end. message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> try {ok, #basic_message{ - exchange_name = ExchangeName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - guid = rabbit_guid:guid(), - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | - header_routes(Props#'P_basic'.headers)]}} + exchange_name = ExchangeName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + guid = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | + header_routes(Props#'P_basic'.headers)]}} catch {error, _Reason} = Error -> Error end. @@ -180,10 +180,10 @@ header_routes(undefined) -> []; header_routes(HeadersTable) -> lists:append( - [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {array, Routes} -> [Route || {longstr, Route} <- Routes]; - undefined -> []; - {Type, _Val} -> throw({error, {unacceptable_type_in_header, - Type, - binary_to_list(HeaderKey)}}) - end || HeaderKey <- ?ROUTING_HEADERS]). + [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + undefined -> []; + {Type, _Val} -> throw({error, {unacceptable_type_in_header, + Type, + binary_to_list(HeaderKey)}}) + end || HeaderKey <- ?ROUTING_HEADERS]). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 96a22dca..7ddb7814 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -70,7 +70,7 @@ rabbit_types:infos()). -spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). -spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) - -> [rabbit_types:infos()]). + -> [rabbit_types:infos()]). -spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()). -spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()). -spec(remove_for_destination/1 :: diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index e92421fc..5fccb542 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -68,9 +68,9 @@ -type(channel_number() :: non_neg_integer()). -spec(start_link/9 :: - (channel_number(), pid(), pid(), rabbit_types:protocol(), - rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), - pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> + (channel_number(), pid(), pid(), rabbit_types:protocol(), + rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), + pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> rabbit_types:ok_pid_or_error()). -spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). -spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(), diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 9cc407bc..8175ad80 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -68,12 +68,12 @@ start_link({direct, Channel, ClientChannelPid, Protocol, User, VHost, {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = supervisor2:start_child( - SupPid, - {channel, {rabbit_channel, start_link, - [Channel, ClientChannelPid, ClientChannelPid, Protocol, - User, VHost, Capabilities, Collector, - start_limiter_fun(SupPid)]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), + SupPid, + {channel, {rabbit_channel, start_link, + [Channel, ClientChannelPid, ClientChannelPid, Protocol, + User, VHost, Capabilities, Collector, + start_limiter_fun(SupPid)]}, + intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl index dbdc6cd4..15e92542 100644 --- a/src/rabbit_client_sup.erl +++ b/src/rabbit_client_sup.erl @@ -29,9 +29,9 @@ -ifdef(use_specs). -spec(start_link/1 :: (mfa()) -> - rabbit_types:ok_pid_or_error()). + rabbit_types:ok_pid_or_error()). -spec(start_link/2 :: ({'local', atom()}, mfa()) -> - rabbit_types:ok_pid_or_error()). + rabbit_types:ok_pid_or_error()). -endif. diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 586563f6..a2693c69 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -26,8 +26,8 @@ -spec(boot/0 :: () -> 'ok'). -spec(connect/4 :: (binary(), binary(), binary(), rabbit_types:protocol()) -> - {'ok', {rabbit_types:user(), - rabbit_framing:amqp_table()}}). + {'ok', {rabbit_types:user(), + rabbit_framing:amqp_table()}}). -spec(start_channel/7 :: (rabbit_channel:channel_number(), pid(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), @@ -40,12 +40,12 @@ boot() -> {ok, _} = supervisor2:start_child( - rabbit_sup, - {rabbit_direct_client_sup, - {rabbit_client_sup, start_link, - [{local, rabbit_direct_client_sup}, - {rabbit_channel_sup, start_link, []}]}, - transient, infinity, supervisor, [rabbit_client_sup]}), + rabbit_sup, + {rabbit_direct_client_sup, + {rabbit_client_sup, start_link, + [{local, rabbit_direct_client_sup}, + {rabbit_channel_sup, start_link, []}]}, + transient, infinity, supervisor, [rabbit_client_sup]}), ok. %%---------------------------------------------------------------------------- @@ -73,7 +73,7 @@ start_channel(Number, ClientChannelPid, Protocol, User, VHost, Capabilities, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( - rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, Protocol, User, VHost, - Capabilities, Collector}]), + rabbit_direct_client_sup, + [{direct, Number, ClientChannelPid, Protocol, User, VHost, + Capabilities, Collector}]), {ok, ChannelPid}. diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 40651d36..9ed532db 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -101,7 +101,7 @@ ensure_stats_timer(State = #state{level = none}, _Fun) -> State; ensure_stats_timer(State = #state{timer = undefined}, Fun) -> {ok, TRef} = timer:apply_after(?STATS_INTERVAL, - erlang, apply, [Fun, []]), + erlang, apply, [Fun, []]), State#state{timer = TRef}; ensure_stats_timer(State, _Fun) -> State. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 92259195..a463e570 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -62,7 +62,7 @@ -> rabbit_types:infos()). -spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). -spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) - -> [rabbit_types:infos()]). + -> [rabbit_types:infos()]). -spec(publish/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> {rabbit_router:routing_result(), [pid()]}). -spec(delete/2 :: @@ -266,9 +266,9 @@ process_route(#resource{kind = queue} = QName, call_with_exchange(XName, Fun, PrePostCommitFun) -> rabbit_misc:execute_mnesia_transaction( fun () -> case mnesia:read({rabbit_exchange, XName}) of - [] -> {error, not_found}; - [X] -> Fun(X) - end + [] -> {error, not_found}; + [X] -> Fun(X) + end end, PrePostCommitFun). delete(XName, IfUnused) -> diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2363d05e..f12661d4 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -42,8 +42,8 @@ description() -> route(#exchange{name = X}, #delivery{message = #basic_message{routing_keys = Routes}}) -> lists:append([begin - Words = split_topic_key(RKey), - mnesia:async_dirty(fun trie_match/2, [X, Words]) + Words = split_topic_key(RKey), + mnesia:async_dirty(fun trie_match/2, [X, Words]) end || RKey <- Routes]). validate(_X) -> ok. @@ -51,9 +51,9 @@ create(_Tx, _X) -> ok. recover(_Exchange, Bs) -> rabbit_misc:execute_mnesia_transaction( - fun () -> - lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end). + fun () -> + lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) + end). delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), @@ -166,9 +166,9 @@ trie_child(X, Node, Word) -> trie_bindings(X, Node) -> MatchHead = #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - destination = '$1'}}, + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + destination = '$1'}}, mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]). trie_add_edge(X, FromNode, ToNode, W) -> @@ -194,9 +194,9 @@ trie_remove_binding(X, Node, D) -> trie_binding_op(X, Node, D, Op) -> ok = Op(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - destination = D}}, + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + destination = D}}, write). trie_has_any_children(X, Node) -> @@ -209,10 +209,10 @@ trie_has_any_children(X, Node) -> trie_has_any_bindings(X, Node) -> has_any(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - _ = '_'}, - _ = '_'}). + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + _ = '_'}, + _ = '_'}). trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, @@ -223,8 +223,8 @@ trie_remove_all_edges(X) -> trie_remove_all_bindings(X) -> remove_all(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, _ = '_'}, - _ = '_'}). + trie_binding = #trie_binding{exchange_name = X, _ = '_'}, + _ = '_'}). has_any(Table, MatchHead) -> Select = mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read), diff --git a/src/rabbit_memory_monitor.erl b/src/rabbit_memory_monitor.erl index 2f8c940b..996b0a98 100644 --- a/src/rabbit_memory_monitor.erl +++ b/src/rabbit_memory_monitor.erl @@ -111,11 +111,11 @@ stop() -> init([]) -> MemoryLimit = trunc(?MEMORY_LIMIT_SCALING * - (try - vm_memory_monitor:get_memory_limit() - catch - exit:{noproc, _} -> ?MEMORY_SIZE_FOR_DISABLED_VMM - end)), + (try + vm_memory_monitor:get_memory_limit() + catch + exit:{noproc, _} -> ?MEMORY_SIZE_FOR_DISABLED_VMM + end)), {ok, TRef} = timer:apply_interval(?DEFAULT_UPDATE_INTERVAL, ?SERVER, update, []), diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index abc27c5f..5579dbab 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -105,7 +105,7 @@ ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) - -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). + -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). -spec(r/2 :: (rabbit_types:vhost(), K) -> rabbit_types:r3(rabbit_types:vhost(), K, '_') when is_subtype(K, atom())). diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index fc95b77b..99fa6ace 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -129,10 +129,10 @@ empty_ram_only_tables() -> Node = node(), lists:foreach( fun (TabName) -> - case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of - true -> {atomic, ok} = mnesia:clear_table(TabName); - false -> ok - end + case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of + true -> {atomic, ok} = mnesia:clear_table(TabName); + false -> ok + end end, table_names()), ok. diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 55e6ac47..4b97d74c 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -46,8 +46,8 @@ rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, any())). -spec(scan/4 :: (io_device(), file_size(), - fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), - A) -> {'ok', A, position()}). + fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), + A) -> {'ok', A, position()}). -endif. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 9e65e442..d1b8f707 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -75,7 +75,7 @@ successfully_recovered, %% boolean: did we recover state? file_size_limit, %% how big are our files allowed to get? cref_to_guids %% client ref to synced messages mapping - }). + }). -record(client_msstate, { server, @@ -89,7 +89,7 @@ file_summary_ets, dedup_cache_ets, cur_file_cache_ets - }). + }). -record(file_summary, {file, valid_total_size, left, right, file_size, locked, readers}). diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 36f61628..fd545a68 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -67,7 +67,7 @@ -spec(close_connection/2 :: (pid(), string()) -> 'ok'). -spec(on_node_down/1 :: (node()) -> 'ok'). -spec(check_tcp_listener_address/2 :: (atom(), listener_config()) - -> [{inet:ip_address(), ip_port(), family(), atom()}]). + -> [{inet:ip_address(), ip_port(), family(), atom()}]). -endif. @@ -98,7 +98,7 @@ boot_ssl() -> verify_peer -> [{verify_fun, fun([]) -> true; ([_|_]) -> false end} - | SslOptsConfig] + | SslOptsConfig] end, [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners], ok diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 817abaa2..ebae48d4 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -76,7 +76,7 @@ handle_cast(_Msg, State) -> handle_info({nodedown, Node}, State) -> rabbit_log:info("node ~p down~n", [Node]), ok = handle_dead_rabbit(Node), - {noreply, State}; + {noreply, State}; handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, State) -> rabbit_log:info("node ~p lost 'rabbit'~n", [Node]), ok = handle_dead_rabbit(Node), diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index d9d92788..7bb8c0ea 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -250,13 +250,13 @@ duplicate_node_check(NodeStr) -> case net_adm:names(NodeHost) of {ok, NamePorts} -> case proplists:is_defined(NodeName, NamePorts) of - true -> io:format("node with name ~p " - "already running on ~p~n", - [NodeName, NodeHost]), - [io:format(Fmt ++ "~n", Args) || - {Fmt, Args} <- rabbit_control:diagnostics(Node)], - terminate(?ERROR_CODE); - false -> ok + true -> io:format("node with name ~p " + "already running on ~p~n", + [NodeName, NodeHost]), + [io:format(Fmt ++ "~n", Args) || + {Fmt, Args} <- rabbit_control:diagnostics(Node)], + terminate(?ERROR_CODE); + false -> ok end; {error, EpmdReason} -> terminate("unexpected epmd error: ~p~n", [EpmdReason]) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 76b1136f..bc329947 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -145,8 +145,8 @@ %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * - (?PUBLISH_RECORD_LENGTH_BYTES + - (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). + (?PUBLISH_RECORD_LENGTH_BYTES + + (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). %% ---- misc ---- @@ -177,7 +177,7 @@ path :: file:filename(), journal_entries :: array(), unacked :: non_neg_integer() - })). + })). -type(seq_id() :: integer()). -type(seg_dict() :: {dict(), [segment()]}). -type(on_sync_fun() :: fun ((gb_set()) -> ok)). @@ -188,10 +188,10 @@ max_journal_entries :: non_neg_integer(), on_sync :: on_sync_fun(), unsynced_guids :: [rabbit_guid:guid()] - }). + }). -type(startup_fun_state() :: {fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A}), - A}). + A}). -type(shutdown_terms() :: [any()]). -spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()). diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index e831ee51..1953b6b8 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -87,8 +87,8 @@ cert_info(F, Cert) -> find_by_type(Type, {rdnSequence, RDNs}) -> case [V || #'AttributeTypeAndValue'{type = T, value = V} - <- lists:flatten(RDNs), - T == Type] of + <- lists:flatten(RDNs), + T == Type] of [{printableString, S}] -> S; [] -> not_found end. @@ -166,7 +166,7 @@ format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; true -> S end; format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, - Min1, Min2, S1, S2, $Z]}) -> + Min1, Min2, S1, S2, $Z]}) -> io_lib:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ", [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]); format_asn1_value(V) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c6250df..b72b3e49 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -693,23 +693,23 @@ test_topic_matching() -> exchange_op_callback(X, Fun, ExtraArgs) -> rabbit_misc:execute_mnesia_transaction( - fun () -> rabbit_exchange:callback(X, Fun, [true, X] ++ ExtraArgs) end), + fun () -> rabbit_exchange:callback(X, Fun, [true, X] ++ ExtraArgs) end), rabbit_exchange:callback(X, Fun, [false, X] ++ ExtraArgs). test_topic_expect_match(X, List) -> lists:foreach( - fun ({Key, Expected}) -> - BinKey = list_to_binary(Key), - Res = rabbit_exchange_type_topic:route( - X, #delivery{message = #basic_message{routing_keys = + fun ({Key, Expected}) -> + BinKey = list_to_binary(Key), + Res = rabbit_exchange_type_topic:route( + X, #delivery{message = #basic_message{routing_keys = [BinKey]}}), - ExpectedRes = lists:map( - fun (Q) -> #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)} - end, Expected), - true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) - end, List). + ExpectedRes = lists:map( + fun (Q) -> #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)} + end, Expected), + true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) + end, List). test_app_management() -> %% starting, stopping, status @@ -818,7 +818,7 @@ test_log_management_during_startup() -> ok = delete_log_handlers([sasl_report_tty_h]), ok = case catch control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, - log_rotation_tty_no_handlers_test}); + log_rotation_tty_no_handlers_test}); {error, {cannot_log_to_tty, _, _}} -> ok end, @@ -843,8 +843,8 @@ test_log_management_during_startup() -> ok = add_log_handlers([{error_logger_file_h, MainLog}]), ok = case control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, - log_rotation_no_write_permission_dir_test}); - {error, {cannot_log_to_file, _, _}} -> ok + log_rotation_no_write_permission_dir_test}); + {error, {cannot_log_to_file, _, _}} -> ok end, %% start application with logging to a subdirectory which @@ -854,9 +854,9 @@ test_log_management_during_startup() -> ok = add_log_handlers([{error_logger_file_h, MainLog}]), ok = case control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, - log_rotatation_parent_dirs_test}); + log_rotatation_parent_dirs_test}); {error, {cannot_log_to_file, _, - {error, {cannot_create_parent_dirs, _, eacces}}}} -> ok + {error, {cannot_create_parent_dirs, _, eacces}}}} -> ok end, ok = set_permissions(TmpDir, 8#00700), ok = set_permissions(TmpLog, 8#00600), @@ -1143,7 +1143,7 @@ test_server_status() -> [_|_] = rabbit_binding:list_for_source( rabbit_misc:r(<<"/">>, exchange, <<"">>)), [_] = rabbit_binding:list_for_destination( - rabbit_misc:r(<<"/">>, queue, <<"foo">>)), + rabbit_misc:r(<<"/">>, queue, <<"foo">>)), [_] = rabbit_binding:list_for_source_and_destination( rabbit_misc:r(<<"/">>, exchange, <<"">>), rabbit_misc:r(<<"/">>, queue, <<"foo">>)), @@ -1305,9 +1305,9 @@ test_delegates_async(SecondaryNode) -> make_responder(FMsg) -> make_responder(FMsg, timeout). make_responder(FMsg, Throw) -> fun () -> - receive Msg -> FMsg(Msg) - after 1000 -> throw(Throw) - end + receive Msg -> FMsg(Msg) + after 1000 -> throw(Throw) + end end. spawn_responders(Node, Responder, Count) -> @@ -1318,10 +1318,10 @@ await_response(0) -> await_response(Count) -> receive response -> ok, - await_response(Count - 1) + await_response(Count - 1) after 1000 -> - io:format("Async reply not received~n"), - throw(timeout) + io:format("Async reply not received~n"), + throw(timeout) end. must_exit(Fun) -> @@ -1337,7 +1337,7 @@ test_delegates_sync(SecondaryNode) -> BadSender = fun (_Pid) -> exit(exception) end, Responder = make_responder(fun ({'$gen_call', From, invoked}) -> - gen_server:reply(From, response) + gen_server:reply(From, response) end), BadResponder = make_responder(fun ({'$gen_call', From, invoked}) -> @@ -1349,7 +1349,7 @@ test_delegates_sync(SecondaryNode) -> must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end), must_exit(fun () -> - delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end), + delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end), LocalGoodPids = spawn_responders(node(), Responder, 2), RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2), @@ -1953,7 +1953,7 @@ test_queue_index() -> with_empty_test_queue( fun (Qi0) -> {Qi1, _SeqIdsGuidsD} = queue_index_publish(SeqIdsD, - false, Qi0), + false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1), Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2), rabbit_queue_index:flush(Qi3) @@ -2195,7 +2195,7 @@ check_variable_queue_status(VQ0, Props) -> variable_queue_wait_for_shuffling_end(VQ) -> case rabbit_variable_queue:needs_idle_timeout(VQ) of true -> variable_queue_wait_for_shuffling_end( - rabbit_variable_queue:idle_timeout(VQ)); + rabbit_variable_queue:idle_timeout(VQ)); false -> VQ end. diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index ab2300c0..a11595e5 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -42,39 +42,39 @@ %% TODO: make this more precise by tying specific class_ids to %% specific properties -type(undecoded_content() :: - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: 'none', - properties_bin :: binary(), - payload_fragments_rev :: [binary()]} | - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: rabbit_framing:amqp_property_record(), - properties_bin :: 'none', - payload_fragments_rev :: [binary()]}). + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: 'none', + properties_bin :: binary(), + payload_fragments_rev :: [binary()]} | + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: rabbit_framing:amqp_property_record(), + properties_bin :: 'none', + payload_fragments_rev :: [binary()]}). -type(unencoded_content() :: undecoded_content()). -type(decoded_content() :: - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: rabbit_framing:amqp_property_record(), - properties_bin :: maybe(binary()), - payload_fragments_rev :: [binary()]}). + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: rabbit_framing:amqp_property_record(), + properties_bin :: maybe(binary()), + payload_fragments_rev :: [binary()]}). -type(encoded_content() :: - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: maybe(rabbit_framing:amqp_property_record()), - properties_bin :: binary(), - payload_fragments_rev :: [binary()]}). + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: maybe(rabbit_framing:amqp_property_record()), + properties_bin :: binary(), + payload_fragments_rev :: [binary()]}). -type(content() :: undecoded_content() | decoded_content()). -type(basic_message() :: - #basic_message{exchange_name :: rabbit_exchange:name(), - routing_keys :: [rabbit_router:routing_key()], - content :: content(), - guid :: rabbit_guid:guid(), - is_persistent :: boolean()}). + #basic_message{exchange_name :: rabbit_exchange:name(), + routing_keys :: [rabbit_router:routing_key()], + content :: content(), + guid :: rabbit_guid:guid(), + is_persistent :: boolean()}). -type(message() :: basic_message()). -type(delivery() :: - #delivery{mandatory :: boolean(), - immediate :: boolean(), - txn :: maybe(txn()), - sender :: pid(), - message :: message()}). + #delivery{mandatory :: boolean(), + immediate :: boolean(), + txn :: maybe(txn()), + sender :: pid(), + message :: message()}). -type(message_properties() :: #message_properties{expiry :: pos_integer() | 'undefined', needs_confirming :: boolean()}). @@ -89,9 +89,9 @@ -type(infos() :: [info()]). -type(amqp_error() :: - #amqp_error{name :: rabbit_framing:amqp_exception(), - explanation :: string(), - method :: rabbit_framing:amqp_method_name()}). + #amqp_error{name :: rabbit_framing:amqp_exception(), + explanation :: string(), + method :: rabbit_framing:amqp_method_name()}). -type(r(Kind) :: r2(vhost(), Kind)). @@ -103,34 +103,34 @@ name :: Name}). -type(listener() :: - #listener{node :: node(), - protocol :: atom(), - host :: rabbit_networking:hostname(), - port :: rabbit_networking:ip_port()}). + #listener{node :: node(), + protocol :: atom(), + host :: rabbit_networking:hostname(), + port :: rabbit_networking:ip_port()}). -type(binding_source() :: rabbit_exchange:name()). -type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()). -type(binding() :: - #binding{source :: rabbit_exchange:name(), - destination :: binding_destination(), - key :: rabbit_binding:key(), - args :: rabbit_framing:amqp_table()}). + #binding{source :: rabbit_exchange:name(), + destination :: binding_destination(), + key :: rabbit_binding:key(), + args :: rabbit_framing:amqp_table()}). -type(amqqueue() :: - #amqqueue{name :: rabbit_amqqueue:name(), - durable :: boolean(), - auto_delete :: boolean(), - exclusive_owner :: rabbit_types:maybe(pid()), - arguments :: rabbit_framing:amqp_table(), - pid :: rabbit_types:maybe(pid())}). + #amqqueue{name :: rabbit_amqqueue:name(), + durable :: boolean(), + auto_delete :: boolean(), + exclusive_owner :: rabbit_types:maybe(pid()), + arguments :: rabbit_framing:amqp_table(), + pid :: rabbit_types:maybe(pid())}). -type(exchange() :: - #exchange{name :: rabbit_exchange:name(), - type :: rabbit_exchange:type(), - durable :: boolean(), - auto_delete :: boolean(), - arguments :: rabbit_framing:amqp_table()}). + #exchange{name :: rabbit_exchange:name(), + type :: rabbit_exchange:type(), + durable :: boolean(), + auto_delete :: boolean(), + arguments :: rabbit_framing:amqp_table()}). -type(connection() :: pid()). diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 89acc10c..ebda5d03 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -106,9 +106,9 @@ upgrades_to_apply(Heads, G) -> %% everything we've already applied. Subtract that from all %% vertices: that's what we have to apply. Unsorted = sets:to_list( - sets:subtract( - sets:from_list(digraph:vertices(G)), - sets:from_list(digraph_utils:reaching(Heads, G)))), + sets:subtract( + sets:from_list(digraph:vertices(G)), + sets:from_list(digraph_utils:reaching(Heads, G)))), %% Form a subgraph from that list and find a topological ordering %% so we can invoke them in order. [element(2, digraph:vertex(G, StepName)) || diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 58a28d32..6a461a77 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -268,13 +268,13 @@ msg_on_disk, index_on_disk, msg_props - }). + }). -record(delta, { start_seq_id, %% start_seq_id is inclusive count, end_seq_id %% end_seq_id is exclusive - }). + }). -record(tx, { pending_messages, pending_acks }). diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl index efebef06..24c130ed 100644 --- a/src/rabbit_vhost.erl +++ b/src/rabbit_vhost.erl @@ -48,15 +48,15 @@ add(VHostPath) -> ok; (ok, false) -> [rabbit_exchange:declare( - rabbit_misc:r(VHostPath, exchange, Name), - Type, true, false, false, []) || - {Name,Type} <- - [{<<"">>, direct}, - {<<"amq.direct">>, direct}, - {<<"amq.topic">>, topic}, - {<<"amq.match">>, headers}, %% per 0-9-1 pdf - {<<"amq.headers">>, headers}, %% per 0-9-1 xml - {<<"amq.fanout">>, fanout}]], + rabbit_misc:r(VHostPath, exchange, Name), + Type, true, false, false, []) || + {Name,Type} <- + [{<<"">>, direct}, + {<<"amq.direct">>, direct}, + {<<"amq.topic">>, topic}, + {<<"amq.match">>, headers}, %% per 0-9-1 pdf + {<<"amq.headers">>, headers}, %% per 0-9-1 xml + {<<"amq.fanout">>, fanout}]], ok end), rabbit_log:info("Added vhost ~p~n", [VHostPath]), -- cgit v1.2.1 From e80b3162f252dcda613583ef2e6b271b0c5c4deb Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 19:49:17 +0000 Subject: cosmetic - comment indentation --- src/gen_server2.erl | 4 +- src/rabbit.erl | 2 +- src/rabbit_amqqueue_process.erl | 4 +- src/rabbit_basic.erl | 2 +- src/rabbit_binary_generator.erl | 13 +-- src/rabbit_mnesia.erl | 14 +-- src/rabbit_networking.erl | 4 +- src/rabbit_reader.erl | 4 +- src/rabbit_tests.erl | 234 ++++++++++++++++++++-------------------- src/rabbit_writer.erl | 10 +- 10 files changed, 146 insertions(+), 145 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 94296f97..43e0a8f5 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -453,8 +453,8 @@ unregister_name({global,Name}) -> _ = global:unregister_name(Name); unregister_name(Pid) when is_pid(Pid) -> Pid; -% Under R12 let's just ignore it, as we have a single term as Name. -% On R13 it will never get here, as we get tuple with 'local/global' atom. +%% Under R12 let's just ignore it, as we have a single term as Name. +%% On R13 it will never get here, as we get tuple with 'local/global' atom. unregister_name(_Name) -> ok. extend_backoff(undefined) -> diff --git a/src/rabbit.erl b/src/rabbit.erl index 6eb59c3e..c9a929ae 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -375,7 +375,7 @@ config_files() -> error -> [] end. -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- print_banner() -> {ok, Product} = application:get_key(id), diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index dde87b69..7719dfe7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -33,7 +33,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2, prioritise_info/2]). -% Queue's state +%% Queue's state -record(q, {q, exclusive_consumer, has_had_consumers, @@ -747,7 +747,7 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> {channel, ChPid}, {queue, self()}]). -%--------------------------------------------------------------------------- +%%---------------------------------------------------------------------------- prioritise_call(Msg, _From, _State) -> case Msg of diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 8c930502..f9a8ee1d 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -175,7 +175,7 @@ is_message_persistent(#content{properties = #'P_basic'{ Other -> throw({error, {delivery_mode_unknown, Other}}) end. -% Extract CC routes from headers +%% Extract CC routes from headers header_routes(undefined) -> []; header_routes(HeadersTable) -> diff --git a/src/rabbit_binary_generator.erl b/src/rabbit_binary_generator.erl index dc81ace6..68511a32 100644 --- a/src/rabbit_binary_generator.erl +++ b/src/rabbit_binary_generator.erl @@ -18,12 +18,13 @@ -include("rabbit_framing.hrl"). -include("rabbit.hrl"). -% EMPTY_CONTENT_BODY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1 -% - 1 byte of frame type -% - 2 bytes of channel number -% - 4 bytes of frame payload length -% - 1 byte of payload trailer FRAME_END byte -% See definition of check_empty_content_body_frame_size/0, an assertion called at startup. +%% EMPTY_CONTENT_BODY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1 +%% - 1 byte of frame type +%% - 2 bytes of channel number +%% - 4 bytes of frame payload length +%% - 1 byte of payload trailer FRAME_END byte +%% See definition of check_empty_content_body_frame_size/0, +%% an assertion called at startup. -define(EMPTY_CONTENT_BODY_FRAME_SIZE, 8). -export([build_simple_method_frame/3, diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 99fa6ace..66436920 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -519,13 +519,13 @@ create_local_table_copies(Type) -> HasDiscOnlyCopies -> disc_only_copies; true -> ram_copies end; -%% unused code - commented out to keep dialyzer happy -%% Type =:= disc_only -> -%% if -%% HasDiscCopies or HasDiscOnlyCopies -> -%% disc_only_copies; -%% true -> ram_copies -%% end; +%%% unused code - commented out to keep dialyzer happy +%%% Type =:= disc_only -> +%%% if +%%% HasDiscCopies or HasDiscOnlyCopies -> +%%% disc_only_copies; +%%% true -> ram_copies +%%% end; Type =:= ram -> ram_copies end, diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index fd545a68..877d2cf7 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -90,8 +90,8 @@ boot_ssl() -> {ok, SslListeners} -> ok = rabbit_misc:start_applications([crypto, public_key, ssl]), {ok, SslOptsConfig} = application:get_env(ssl_options), - % unknown_ca errors are silently ignored prior to R14B unless we - % supply this verify_fun - remove when at least R14B is required + %% unknown_ca errors are silently ignored prior to R14B unless we + %% supply this verify_fun - remove when at least R14B is required SslOpts = case proplists:get_value(verify, SslOptsConfig, verify_none) of verify_none -> SslOptsConfig; diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index b172db56..f9a3d9c7 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -37,7 +37,7 @@ -define(SILENT_CLOSE_DELAY, 3). -define(FRAME_MAX, 131072). %% set to zero once QPid fix their negotiation -%--------------------------------------------------------------------------- +%%-------------------------------------------------------------------------- -record(v1, {parent, sock, connection, callback, recv_length, recv_ref, connection_state, queue_collector, heartbeater, stats_timer, @@ -62,7 +62,7 @@ State#v1.connection_state =:= blocking orelse State#v1.connection_state =:= blocked)). -%%---------------------------------------------------------------------------- +%%-------------------------------------------------------------------------- -ifdef(use_specs). diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index b72b3e49..88b58166 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -425,35 +425,35 @@ test_content_properties() -> [{<<"one">>, signedint, 1}, {<<"two">>, signedint, 2}]}]}], << - % property-flags - 16#8000:16, + %% property-flags + 16#8000:16, - % property-list: + %% property-list: - % table - 117:32, % table length in bytes + %% table + 117:32, % table length in bytes - 11,"a signedint", % name - "I",12345678:32, % type and value + 11,"a signedint", % name + "I",12345678:32, % type and value - 9,"a longstr", - "S",10:32,"yes please", + 9,"a longstr", + "S",10:32,"yes please", - 9,"a decimal", - "D",123,12345678:32, + 9,"a decimal", + "D",123,12345678:32, - 11,"a timestamp", - "T", 123456789012345:64, + 11,"a timestamp", + "T", 123456789012345:64, - 14,"a nested table", - "F", - 18:32, + 14,"a nested table", + "F", + 18:32, - 3,"one", - "I",1:32, + 3,"one", + "I",1:32, - 3,"two", - "I",2:32 >>), + 3,"two", + "I",2:32 >>), case catch rabbit_binary_parser:parse_properties([bit, bit, bit, bit], <<16#A0,0,1>>) of {'EXIT', content_properties_binary_overflow} -> passed; V -> exit({got_success_but_expected_failure, V}) @@ -480,28 +480,28 @@ test_field_values() -> ]}], << - % property-flags - 16#8000:16, - % table length in bytes - 228:32, - - 7,"longstr", "S", 21:32, "Here is a long string", % = 34 - 9,"signedint", "I", 12345:32/signed, % + 15 = 49 - 7,"decimal", "D", 3, 123456:32, % + 14 = 63 - 9,"timestamp", "T", 109876543209876:64, % + 19 = 82 - 5,"table", "F", 31:32, % length of table % + 11 = 93 - 3,"one", "I", 54321:32, % + 9 = 102 - 3,"two", "S", 13:32, "A long string",% + 22 = 124 - 4,"byte", "b", 255:8, % + 7 = 131 - 4,"long", "l", 1234567890:64, % + 14 = 145 - 5,"short", "s", 655:16, % + 9 = 154 - 4,"bool", "t", 1, % + 7 = 161 - 6,"binary", "x", 15:32, "a binary string", % + 27 = 188 - 4,"void", "V", % + 6 = 194 - 5,"array", "A", 23:32, % + 11 = 205 - "I", 54321:32, % + 5 = 210 - "S", 13:32, "A long string" % + 18 = 228 - >>), + %% property-flags + 16#8000:16, + %% table length in bytes + 228:32, + + 7,"longstr", "S", 21:32, "Here is a long string", % = 34 + 9,"signedint", "I", 12345:32/signed, % + 15 = 49 + 7,"decimal", "D", 3, 123456:32, % + 14 = 63 + 9,"timestamp", "T", 109876543209876:64, % + 19 = 82 + 5,"table", "F", 31:32, % length of table % + 11 = 93 + 3,"one", "I", 54321:32, % + 9 = 102 + 3,"two", "S", 13:32, "A long string", % + 22 = 124 + 4,"byte", "b", 255:8, % + 7 = 131 + 4,"long", "l", 1234567890:64, % + 14 = 145 + 5,"short", "s", 655:16, % + 9 = 154 + 4,"bool", "t", 1, % + 7 = 161 + 6,"binary", "x", 15:32, "a binary string", % + 27 = 188 + 4,"void", "V", % + 6 = 194 + 5,"array", "A", 23:32, % + 11 = 205 + "I", 54321:32, % + 5 = 210 + "S", 13:32, "A long string" % + 18 = 228 + >>), passed. %% Test that content frames don't exceed frame-max @@ -598,65 +598,65 @@ test_topic_matching() -> %% add some bindings Bindings = lists:map( - fun ({Key, Q}) -> - #binding{source = XName, - key = list_to_binary(Key), - destination = #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)}} - end, [{"a.b.c", "t1"}, - {"a.*.c", "t2"}, - {"a.#.b", "t3"}, - {"a.b.b.c", "t4"}, - {"#", "t5"}, - {"#.#", "t6"}, - {"#.b", "t7"}, - {"*.*", "t8"}, - {"a.*", "t9"}, - {"*.b.c", "t10"}, - {"a.#", "t11"}, - {"a.#.#", "t12"}, - {"b.b.c", "t13"}, - {"a.b.b", "t14"}, - {"a.b", "t15"}, - {"b.c", "t16"}, - {"", "t17"}, - {"*.*.*", "t18"}, - {"vodka.martini", "t19"}, - {"a.b.c", "t20"}, - {"*.#", "t21"}, - {"#.*.#", "t22"}, - {"*.#.#", "t23"}, - {"#.#.#", "t24"}, - {"*", "t25"}, - {"#.b.#", "t26"}]), + fun ({Key, Q}) -> + #binding{source = XName, + key = list_to_binary(Key), + destination = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}} + end, [{"a.b.c", "t1"}, + {"a.*.c", "t2"}, + {"a.#.b", "t3"}, + {"a.b.b.c", "t4"}, + {"#", "t5"}, + {"#.#", "t6"}, + {"#.b", "t7"}, + {"*.*", "t8"}, + {"a.*", "t9"}, + {"*.b.c", "t10"}, + {"a.#", "t11"}, + {"a.#.#", "t12"}, + {"b.b.c", "t13"}, + {"a.b.b", "t14"}, + {"a.b", "t15"}, + {"b.c", "t16"}, + {"", "t17"}, + {"*.*.*", "t18"}, + {"vodka.martini", "t19"}, + {"a.b.c", "t20"}, + {"*.#", "t21"}, + {"#.*.#", "t22"}, + {"*.#.#", "t23"}, + {"#.#.#", "t24"}, + {"*", "t25"}, + {"#.b.#", "t26"}]), lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), %% test some matches - test_topic_expect_match(X, - [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", - "t18", "t20", "t21", "t22", "t23", "t24", - "t26"]}, - {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", - "t12", "t15", "t21", "t22", "t23", "t24", - "t26"]}, - {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", - "t18", "t21", "t22", "t23", "t24", "t26"]}, - {"", ["t5", "t6", "t17", "t24"]}, - {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24", - "t26"]}, - {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", "t23", - "t24"]}, - {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", - "t24"]}, - {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", - "t24"]}, - {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", "t22", - "t23", "t24", "t26"]}, - {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, - {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", - "t25"]}]), + test_topic_expect_match( + X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", + "t18", "t20", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", + "t12", "t15", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", + "t18", "t21", "t22", "t23", "t24", "t26"]}, + {"", ["t5", "t6", "t17", "t24"]}, + {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", + "t24", "t26"]}, + {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", + "t23", "t24"]}, + {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", + "t24"]}, + {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", + "t24"]}, + {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", + "t22", "t23", "t24", "t26"]}, + {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, + {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", + "t25"]}]), %% remove some bindings RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), @@ -669,21 +669,21 @@ test_topic_matching() -> %% test some matches test_topic_expect_match(X, - [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", - "t23", "t24", "t26"]}, - {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", - "t22", "t23", "t24", "t26"]}, - {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", - "t23", "t24", "t26"]}, - {"", ["t6", "t17", "t24"]}, - {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, - {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, - {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, - {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, - {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", - "t24", "t26"]}, - {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, - {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", + "t23", "t24", "t26"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", + "t22", "t23", "t24", "t26"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", + "t23", "t24", "t26"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", + "t24", "t26"]}, + {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), %% remove the entire exchange exchange_op_callback(X, delete, [RemainingBindings]), @@ -876,22 +876,22 @@ test_log_management_during_startup() -> passed. test_option_parser() -> - % command and arguments should just pass through + %% command and arguments should just pass through ok = check_get_options({["mock_command", "arg1", "arg2"], []}, [], ["mock_command", "arg1", "arg2"]), - % get flags + %% get flags ok = check_get_options( {["mock_command", "arg1"], [{"-f", true}, {"-f2", false}]}, [{flag, "-f"}, {flag, "-f2"}], ["mock_command", "arg1", "-f"]), - % get options + %% get options ok = check_get_options( {["mock_command"], [{"-foo", "bar"}, {"-baz", "notbaz"}]}, [{option, "-foo", "notfoo"}, {option, "-baz", "notbaz"}], ["mock_command", "-foo", "bar"]), - % shuffled and interleaved arguments and options + %% shuffled and interleaved arguments and options ok = check_get_options( {["a1", "a2", "a3"], [{"-o1", "hello"}, {"-o2", "noto2"}, {"-f", true}]}, [{option, "-o1", "noto1"}, {flag, "-f"}, {option, "-o2", "noto2"}], @@ -1438,7 +1438,7 @@ test_declare_on_dead_queue(SecondaryNode) -> throw(failed_to_create_and_kill_queue) end. -%--------------------------------------------------------------------- +%%--------------------------------------------------------------------- control_action(Command, Args) -> control_action(Command, node(), Args, default_options()). diff --git a/src/rabbit_writer.erl b/src/rabbit_writer.erl index eba86a55..ac3434d2 100644 --- a/src/rabbit_writer.erl +++ b/src/rabbit_writer.erl @@ -28,7 +28,7 @@ -define(HIBERNATE_AFTER, 5000). -%%---------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- -ifdef(use_specs). @@ -69,7 +69,7 @@ -endif. -%%---------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- start(Sock, Channel, FrameMax, Protocol, ReaderPid) -> {ok, @@ -133,7 +133,7 @@ handle_message({inet_reply, _, Status}, _State) -> handle_message(Message, _State) -> exit({writer, message_not_understood, Message}). -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- send_command(W, MethodRecord) -> W ! {send_command, MethodRecord}, @@ -157,13 +157,13 @@ send_command_and_notify(W, Q, ChPid, MethodRecord, Content) -> W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content}, ok. -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- call(Pid, Msg) -> {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity), Res. -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- assemble_frame(Channel, MethodRecord, Protocol) -> ?LOGMESSAGE(out, Channel, MethodRecord, none), -- cgit v1.2.1 From c8044c53b6a8eed5b685ff263b4ffbcba37a98c7 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 20:31:09 +0000 Subject: cosmetic --- src/rabbit_amqqueue.erl | 6 ++--- src/rabbit_channel.erl | 14 +++++------ src/rabbit_control.erl | 46 +++++++++++++++++------------------- src/rabbit_misc.erl | 22 +++++++++--------- src/rabbit_msg_file.erl | 50 +++++++++++++++++++-------------------- src/rabbit_msg_store.erl | 54 +++++++++++++++++++++---------------------- src/rabbit_queue_index.erl | 18 +++++++-------- src/rabbit_reader.erl | 19 ++++++++------- src/rabbit_router.erl | 6 ++--- src/rabbit_variable_queue.erl | 33 +++++++++++++------------- 10 files changed, 131 insertions(+), 137 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 7a996a98..8e4ca8e3 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -214,8 +214,8 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) -> [] -> ok = store_queue(Q), B = add_default_binding(Q), fun (Tx) -> B(Tx), Q end; - [_] -> %% Q exists on stopped node - rabbit_misc:const(not_found) + %% Q exists on stopped node + [_] -> rabbit_misc:const(not_found) end; [ExistingQ = #amqqueue{pid = QPid}] -> case rabbit_misc:is_process_alive(QPid) of @@ -288,7 +288,7 @@ with_exclusive_access_or_die(Name, ReaderPid, F) -> fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end). assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, - RequiredArgs) -> + RequiredArgs) -> rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName, [<<"x-expires">>]). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 5fccb542..526fb428 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -301,8 +301,8 @@ handle_info({'DOWN', _MRef, process, QPid, Reason}, {MXs, State2} = process_confirms(MsgSeqNos, QPid, State1), erase_queue_stats(QPid), State3 = (case Reason of - normal -> fun record_confirms/2; - _ -> fun send_nacks/2 + normal -> fun record_confirms/2; + _ -> fun send_nacks/2 end)(MXs, State2), noreply(queue_blocked(QPid, State3)). @@ -715,9 +715,9 @@ handle_method(#'basic.consume'{queue = QueueNameBin, end) of ok -> {noreply, State#ch{consumer_mapping = - dict:store(ActualConsumerTag, - QueueName, - ConsumerMapping)}}; + dict:store(ActualConsumerTag, + QueueName, + ConsumerMapping)}}; {error, exclusive_consume_unavailable} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -739,8 +739,8 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, return_ok(State, NoWait, OkMsg); {ok, QueueName} -> NewState = State#ch{consumer_mapping = - dict:erase(ConsumerTag, - ConsumerMapping)}, + dict:erase(ConsumerTag, + ConsumerMapping)}, case rabbit_amqqueue:with( QueueName, fun (Q) -> diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 746bb66e..8364ecd8 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -103,24 +103,22 @@ print_badrpc_diagnostics(Node) -> diagnostics(Node) -> {_NodeName, NodeHost} = rabbit_misc:nodeparts(Node), - [ - {"diagnostics:", []}, - case net_adm:names(NodeHost) of - {error, EpmdReason} -> - {"- unable to connect to epmd on ~s: ~w", - [NodeHost, EpmdReason]}; - {ok, NamePorts} -> - {"- nodes and their ports on ~s: ~p", - [NodeHost, [{list_to_atom(Name), Port} || - {Name, Port} <- NamePorts]]} - end, - {"- current node: ~w", [node()]}, - case init:get_argument(home) of - {ok, [[Home]]} -> {"- current node home dir: ~s", [Home]}; - Other -> {"- no current node home dir: ~p", [Other]} - end, - {"- current node cookie hash: ~s", [rabbit_misc:cookie_hash()]} - ]. + [{"diagnostics:", []}, + case net_adm:names(NodeHost) of + {error, EpmdReason} -> + {"- unable to connect to epmd on ~s: ~w", + [NodeHost, EpmdReason]}; + {ok, NamePorts} -> + {"- nodes and their ports on ~s: ~p", + [NodeHost, [{list_to_atom(Name), Port} || + {Name, Port} <- NamePorts]]} + end, + {"- current node: ~w", [node()]}, + case init:get_argument(home) of + {ok, [[Home]]} -> {"- current node home dir: ~s", [Home]}; + Other -> {"- no current node home dir: ~p", [Other]} + end, + {"- current node cookie hash: ~s", [rabbit_misc:cookie_hash()]}]. stop() -> ok. @@ -152,13 +150,13 @@ action(force_reset, Node, [], _Opts, Inform) -> action(cluster, Node, ClusterNodeSs, _Opts, Inform) -> ClusterNodes = lists:map(fun list_to_atom/1, ClusterNodeSs), Inform("Clustering node ~p with ~p", - [Node, ClusterNodes]), + [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, cluster, [ClusterNodes]); action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> ClusterNodes = lists:map(fun list_to_atom/1, ClusterNodeSs), Inform("Forcefully clustering node ~p with ~p (ignoring offline nodes)", - [Node, ClusterNodes]), + [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, force_cluster, [ClusterNodes]); action(status, Node, [], _Opts, Inform) -> @@ -320,10 +318,8 @@ wait_for_application0(Node, Attempts) -> wait_for_application(Node, Attempts). default_if_empty(List, Default) when is_list(List) -> - if List == [] -> - Default; - true -> - [list_to_atom(X) || X <- List] + if List == [] -> Default; + true -> [list_to_atom(X) || X <- List] end. display_info_list(Results, InfoItemKeys) when is_list(Results) -> @@ -414,7 +410,7 @@ prettify_typed_amqp_value(Type, Value) -> _ -> Value end. -% the slower shutdown on windows required to flush stdout +%% the slower shutdown on windows required to flush stdout quit(Status) -> case os:type() of {unix, _} -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 5579dbab..e79a58a1 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -469,11 +469,11 @@ map_in_order(F, L) -> table_fold(F, Acc0, TableName) -> lists:foldl( fun (E, Acc) -> execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, read) of - [] -> Acc; - _ -> F(E, Acc) - end - end) + fun () -> case mnesia:match_object(TableName, E, read) of + [] -> Acc; + _ -> F(E, Acc) + end + end) end, Acc0, dirty_read_all(TableName)). dirty_read_all(TableName) -> @@ -755,12 +755,12 @@ unlink_and_capture_exit(Pid) -> after 0 -> ok end. -% Separate flags and options from arguments. -% get_options([{flag, "-q"}, {option, "-p", "/"}], -% ["set_permissions","-p","/","guest", -% "-q",".*",".*",".*"]) -% == {["set_permissions","guest",".*",".*",".*"], -% [{"-q",true},{"-p","/"}]} +%% Separate flags and options from arguments. +%% get_options([{flag, "-q"}, {option, "-p", "/"}], +%% ["set_permissions","-p","/","guest", +%% "-q",".*",".*",".*"]) +%% == {["set_permissions","guest",".*",".*",".*"], +%% [{"-q",true},{"-p","/"}]} get_options(Defs, As) -> lists:foldl(fun(Def, {AsIn, RsIn}) -> {AsOut, Value} = case Def of diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 4b97d74c..ea7cf80c 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -60,9 +60,9 @@ append(FileHdl, Guid, MsgBody) Size = MsgBodyBinSize + ?GUID_SIZE_BYTES, case file_handle_cache:append(FileHdl, <>) of + Guid:?GUID_SIZE_BYTES/binary, + MsgBodyBin:MsgBodyBinSize/binary, + ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>) of ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT}; KO -> KO end. @@ -72,9 +72,9 @@ read(FileHdl, TotalSize) -> BodyBinSize = Size - ?GUID_SIZE_BYTES, case file_handle_cache:read(FileHdl, TotalSize) of {ok, <>} -> + Guid:?GUID_SIZE_BYTES/binary, + MsgBodyBin:BodyBinSize/binary, + ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>} -> {ok, {Guid, binary_to_term(MsgBodyBin)}}; KO -> KO end. @@ -97,26 +97,26 @@ scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) -> end. scanner(<<>>, Offset, _Fun, Acc) -> - {<<>>, Acc, Offset}; + {<<>>, Acc, Offset}; scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> - {<<>>, Acc, Offset}; %% Nothing to do other than stop. + {<<>>, Acc, Offset}; %% Nothing to do other than stop. scanner(<>, Offset, Fun, Acc) -> - TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, - case WriteMarker of - ?WRITE_OK_MARKER -> - %% Here we take option 5 from - %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the Guid as a number, and then convert it - %% back to a binary in order to work around bugs in - %% Erlang's GC. - <> = - <>, - <> = <>, - scanner(Rest, Offset + TotalSize, Fun, - Fun({Guid, TotalSize, Offset, Msg}, Acc)); - _ -> - scanner(Rest, Offset + TotalSize, Fun, Acc) - end; + TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, + case WriteMarker of + ?WRITE_OK_MARKER -> + %% Here we take option 5 from + %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in + %% which we read the Guid as a number, and then convert it + %% back to a binary in order to work around bugs in + %% Erlang's GC. + <> = + <>, + <> = <>, + scanner(Rest, Offset + TotalSize, Fun, + Fun({Guid, TotalSize, Offset, Msg}, Acc)); + _ -> + scanner(Rest, Offset + TotalSize, Fun, Acc) + end; scanner(Data, Offset, _Fun, Acc) -> - {Data, Acc, Offset}. + {Data, Acc, Offset}. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index d1b8f707..8e1b2ac4 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -549,7 +549,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% GC ends, we +1 readers, msg_store ets:deletes (and %% unlocks the dest) try Release(), - Defer() + Defer() catch error:badarg -> read(Guid, CState) end; [#file_summary { locked = false }] -> @@ -667,7 +667,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> successfully_recovered = CleanShutdown, file_size_limit = FileSizeLimit, cref_to_guids = dict:new() - }, + }, %% If we didn't recover the msg location index then we need to %% rebuild it now. @@ -1256,7 +1256,7 @@ safe_file_delete(File, Dir, FileHandlesEts) -> close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts, client_ref = Ref } = - CState) -> + CState) -> Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}), {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) -> true = ets:delete(FileHandlesEts, Key), @@ -1465,7 +1465,7 @@ recover_file_summary(true, Dir) -> Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME), case ets:file2tab(Path) of {ok, Tid} -> file:delete(Path), - {true, Tid}; + {true, Tid}; {error, _Error} -> recover_file_summary(false, Dir) end. @@ -1530,7 +1530,7 @@ scan_file_for_valid_messages(Dir, FileName) -> {ok, Hdl} -> Valid = rabbit_msg_file:scan( Hdl, filelib:file_size( form_filename(Dir, FileName)), - fun scan_fun/2, []), + fun scan_fun/2, []), %% if something really bad has happened, %% the close could fail, but ignore file_handle_cache:close(Hdl), @@ -1693,8 +1693,8 @@ maybe_compact(State = #msstate { sum_valid_data = SumValid, pending_gc_completion = Pending, file_summary_ets = FileSummaryEts, file_size_limit = FileSizeLimit }) - when (SumFileSize > 2 * FileSizeLimit andalso - (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION) -> + when SumFileSize > 2 * FileSizeLimit andalso + (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION -> %% TODO: the algorithm here is sub-optimal - it may result in a %% complete traversal of FileSummaryEts. case ets:first(FileSummaryEts) of @@ -1757,10 +1757,10 @@ delete_file_if_empty(File, State = #msstate { locked = false }] = ets:lookup(FileSummaryEts, File), case ValidData of - 0 -> %% don't delete the file_summary_ets entry for File here - %% because we could have readers which need to be able to - %% decrement the readers count. - true = ets:update_element(FileSummaryEts, File, + %% don't delete the file_summary_ets entry for File here + %% because we could have readers which need to be able to + %% decrement the readers count. + 0 -> true = ets:update_element(FileSummaryEts, File, {#file_summary.locked, true}), ok = rabbit_msg_store_gc:delete(GCPid, File), Pending1 = orddict_store(File, [], Pending), @@ -1813,17 +1813,17 @@ combine_files(Source, Destination, dir = Dir, msg_store = Server }) -> [#file_summary { - readers = 0, - left = Destination, - valid_total_size = SourceValid, - file_size = SourceFileSize, - locked = true }] = ets:lookup(FileSummaryEts, Source), + readers = 0, + left = Destination, + valid_total_size = SourceValid, + file_size = SourceFileSize, + locked = true }] = ets:lookup(FileSummaryEts, Source), [#file_summary { - readers = 0, - right = Source, - valid_total_size = DestinationValid, - file_size = DestinationFileSize, - locked = true }] = ets:lookup(FileSummaryEts, Destination), + readers = 0, + right = Source, + valid_total_size = DestinationValid, + file_size = DestinationFileSize, + locked = true }] = ets:lookup(FileSummaryEts, Destination), SourceName = filenum_to_name(Source), DestinationName = filenum_to_name(Destination), @@ -2001,12 +2001,12 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> ?HANDLE_CACHE_BUFFER_SIZE}]), {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( - RefOld, filelib:file_size(FileOld), - fun({Guid, _Size, _Offset, BinMsg}, ok) -> - {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), - {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), - ok - end, ok), + RefOld, filelib:file_size(FileOld), + fun({Guid, _Size, _Offset, BinMsg}, ok) -> + {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), + {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + ok + end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), ok. diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index bc329947..00f5a752 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -272,7 +272,7 @@ publish(Guid, SeqId, MsgProps, IsPersistent, false -> ?PUB_TRANS_JPREFIX end):?JPREFIX_BITS, SeqId:?SEQ_BITS>>, - create_pub_record_body(Guid, MsgProps)]), + create_pub_record_body(Guid, MsgProps)]), maybe_flush_journal( add_to_journal(SeqId, {Guid, MsgProps, IsPersistent}, State1)). @@ -666,8 +666,8 @@ recover_journal(State) -> journal_minus_segment(JEntries, SegEntries), Segment #segment { journal_entries = JEntries1, unacked = (UnackedCountInJournal + - UnackedCountInSeg - - UnackedCountDuplicates) } + UnackedCountInSeg - + UnackedCountDuplicates) } end, Segments), State1 #qistate { segments = Segments1 }. @@ -799,16 +799,16 @@ write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> {Guid, MsgProps, IsPersistent} -> file_handle_cache:append( Hdl, [<>, - create_pub_record_body(Guid, MsgProps)]) + (bool_to_int(IsPersistent)):1, + RelSeq:?REL_SEQ_BITS>>, + create_pub_record_body(Guid, MsgProps)]) end, ok = case {Del, Ack} of {no_del, no_ack} -> ok; _ -> Binary = <>, + RelSeq:?REL_SEQ_BITS>>, file_handle_cache:append( Hdl, case {Del, Ack} of {del, ack} -> [Binary, Binary]; @@ -853,14 +853,14 @@ load_segment(KeepAcked, #segment { path = Path }) -> load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of {ok, <>} -> + IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>} -> {Guid, MsgProps} = read_pub_record_body(Hdl), Obj = {{Guid, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, SegEntries1 = array:set(RelSeq, Obj, SegEntries), load_segment_entries(KeepAcked, Hdl, SegEntries1, UnackedCount + 1); {ok, <>} -> + RelSeq:?REL_SEQ_BITS>>} -> {UnackedCountDelta, SegEntries1} = case array:get(RelSeq, SegEntries) of {Pub, no_del, no_ack} -> diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index f9a3d9c7..710e6878 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -592,14 +592,14 @@ handle_method0(MethodName, FieldsBin, State = #v1{connection = #connection{protocol = Protocol}}) -> HandleException = fun(R) -> - case ?IS_RUNNING(State) of - true -> send_exception(State, 0, R); - %% We don't trust the client at this point - force - %% them to wait for a bit so they can't DOS us with - %% repeated failed logins etc. - false -> timer:sleep(?SILENT_CLOSE_DELAY * 1000), - throw({channel0_error, State#v1.connection_state, R}) - end + case ?IS_RUNNING(State) of + true -> send_exception(State, 0, R); + %% We don't trust the client at this point - force + %% them to wait for a bit so they can't DOS us with + %% repeated failed logins etc. + false -> timer:sleep(?SILENT_CLOSE_DELAY * 1000), + throw({channel0_error, State#v1.connection_state, R}) + end end, try handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin), @@ -734,8 +734,7 @@ auth_mechanisms(Sock) -> auth_mechanisms_binary(Sock) -> list_to_binary( - string:join( - [atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")). + string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")). auth_phase(Response, State = #v1{auth_mechanism = AuthMechanism, diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 53e707f4..f6a1c92f 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -59,7 +59,7 @@ deliver(QNames, Delivery = #delivery{mandatory = false, {routed, QPids}; deliver(QNames, Delivery = #delivery{mandatory = Mandatory, - immediate = Immediate}) -> + immediate = Immediate}) -> QPids = lookup_qpids(QNames), {Success, _} = delegate:invoke(QPids, @@ -67,7 +67,7 @@ deliver(QNames, Delivery = #delivery{mandatory = Mandatory, rabbit_amqqueue:deliver(Pid, Delivery) end), {Routed, Handled} = - lists:foldl(fun fold_deliveries/2, {false, []}, Success), + lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). @@ -91,7 +91,7 @@ match_routing_key(SrcName, [RoutingKey]) -> mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]); match_routing_key(SrcName, [_|_] = RoutingKeys) -> Condition = list_to_tuple(['orelse' | [{'=:=', '$2', RKey} || - RKey <- RoutingKeys]]), + RKey <- RoutingKeys]]), MatchHead = #route{binding = #binding{source = SrcName, destination = '$1', key = '$2', diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 6a461a77..07f31a3a 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -510,8 +510,7 @@ publish(Msg, MsgProps, State) -> a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { guid = Guid }, - #message_properties { - needs_confirming = NeedsConfirming }, + #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> case NeedsConfirming of true -> blind_confirm(self(), gb_sets:singleton(Guid)); @@ -632,12 +631,12 @@ internal_fetch(AckRequired, MsgStatus = #msg_status { %% 3. If an ack is required, add something sensible to PA {AckTag, State1} = case AckRequired of - true -> StateN = record_pending_ack( - MsgStatus #msg_status { - is_delivered = true }, State), - {SeqId, StateN}; - false -> {undefined, State} - end, + true -> StateN = record_pending_ack( + MsgStatus #msg_status { + is_delivered = true }, State), + {SeqId, StateN}; + false -> {undefined, State} + end, PCount1 = PCount - one_if(IsPersistent andalso not AckRequired), Len1 = Len - 1, @@ -777,8 +776,8 @@ ram_duration(State = #vqstate { RamAckCount = gb_trees:size(RamAckIndex), Duration = %% msgs+acks / (msgs+acks/sec) == sec - case AvgEgressRate == 0 andalso AvgIngressRate == 0 andalso - AvgAckEgressRate == 0 andalso AvgAckIngressRate == 0 of + case (AvgEgressRate == 0 andalso AvgIngressRate == 0 andalso + AvgAckEgressRate == 0 andalso AvgAckIngressRate == 0) of true -> infinity; false -> (RamMsgCountPrev + RamMsgCount + RamAckCount + RamAckCountPrev) / @@ -1393,7 +1392,7 @@ accumulate_ack_init() -> {[], orddict:new()}. accumulate_ack(_SeqId, #msg_status { is_persistent = false, %% ASSERTIONS msg_on_disk = false, index_on_disk = false }, - {PersistentSeqIdsAcc, GuidsByStore}) -> + {PersistentSeqIdsAcc, GuidsByStore}) -> {PersistentSeqIdsAcc, GuidsByStore}; accumulate_ack(SeqId, {IsPersistent, Guid, _MsgProps}, {PersistentSeqIdsAcc, GuidsByStore}) -> @@ -1817,12 +1816,12 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> multiple_routing_keys() -> transform_storage( - fun ({basic_message, ExchangeName, Routing_Key, Content, - Guid, Persistent}) -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - (_) -> {error, corrupt_message} - end), + fun ({basic_message, ExchangeName, Routing_Key, Content, + Guid, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + (_) -> {error, corrupt_message} + end), ok. -- cgit v1.2.1 From 0e40c5131cf79c123b9eb85100bedebaa218df45 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 20:42:51 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 2 +- src/rabbit_queue_index.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 48fce9ed..4f5d2411 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -150,7 +150,7 @@ -spec(client_ref/1 :: (client_msstate()) -> client_ref()). -spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'). -spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) -> - {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). + {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). -spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). -spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). -spec(release/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 59d87654..8227e4cd 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -214,7 +214,7 @@ boolean(), boolean()}], qistate()}). -spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()). -spec(bounds/1 :: (qistate()) -> - {non_neg_integer(), non_neg_integer(), qistate()}). + {non_neg_integer(), non_neg_integer(), qistate()}). -spec(recover/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}). -spec(add_queue_ttl/0 :: () -> 'ok'). -- cgit v1.2.1 From 40d08e7806c1980d428cd3065f71faa08e7239a9 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 00:31:49 +0000 Subject: make handling of confirms more obvious in BQ API and fix some bugs introduced earlier ...amazingly it all seems to work now --- include/rabbit_backing_queue_spec.hrl | 1 + src/rabbit_amqqueue_process.erl | 31 ++++++++++----------- src/rabbit_backing_queue.erl | 4 +++ src/rabbit_variable_queue.erl | 51 +++++++++++++++++++---------------- 4 files changed, 49 insertions(+), 38 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 2e4d1b0a..b2bf6bbb 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -43,6 +43,7 @@ (false, rabbit_types:basic_message(), rabbit_types:message_properties(), state()) -> {undefined, state()}). +-spec(drain_confirmed/1 :: (state()) -> {[rabbit_guid:guid()], state()}). -spec(dropwhile/2 :: (fun ((rabbit_types:message_properties()) -> boolean()), state()) -> state()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 069b803e..4d8b936a 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -215,13 +215,15 @@ noreply(NewState) -> {NewState1, Timeout} = next_state(NewState), {noreply, NewState1, Timeout}. -next_state(State) -> - State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = - ensure_rate_timer(State), - State2 = ensure_stats_timer(State1), - case BQ:needs_idle_timeout(BQS) of - true -> {ensure_sync_timer(State2), 0}; - false -> {stop_sync_timer(State2), hibernate} +next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> + {Guids, BQS1} = BQ:drain_confirmed(BQS), + BQNeedsSync = BQ:needs_idle_timeout(BQS1), + State1 = ensure_stats_timer( + ensure_rate_timer( + confirm_messages(Guids, State#q{backing_queue_state = BQS1}))), + case BQNeedsSync of + true -> {ensure_sync_timer(State1), 0}; + false -> {stop_sync_timer(State1), hibernate} end. ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> @@ -418,6 +420,8 @@ deliver_from_queue_deliver(AckRequired, false, State) -> fetch(AckRequired, State), {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. +confirm_messages([], State) -> + State; confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> {CMs, GTC1} = lists:foldl( @@ -523,9 +527,8 @@ deliver_or_enqueue(Delivery, State) -> requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> maybe_run_queue_via_backing_queue( - fun (BQS) -> - {[], BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS)} - end, State). + fun (BQS) -> BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS) end, + State). fetch(AckRequired, State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> @@ -628,13 +631,11 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> - maybe_run_queue_via_backing_queue( - fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State). + maybe_run_queue_via_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, + State). maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> - {Guids, BQS1} = Fun(BQS), - run_message_queue( - confirm_messages(Guids, State#q{backing_queue_state = BQS1})). + run_message_queue(State#q{backing_queue_state = Fun(BQS)}). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, State = #q{backing_queue = BQ, diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index a8e201ea..b06f1e9c 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -54,6 +54,10 @@ behaviour_info(callbacks) -> %% (i.e. saves the round trip through the backing queue). {publish_delivered, 4}, + %% Return ids of messages which have been confirmed since + %% the last invocation of this function (or initialisation). + {drain_confirmed, 1}, + %% Drop messages from the head of the queue while the supplied %% predicate returns true. {dropwhile, 2}, diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 67c4cc3c..eca3d8d3 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -17,8 +17,8 @@ -module(rabbit_variable_queue). -export([init/5, terminate/1, delete_and_terminate/1, - purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, - tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, + purge/1, publish/3, publish_delivered/4, drain_confirmed/1, + fetch/2, ack/2, tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, @@ -255,6 +255,7 @@ msgs_on_disk, msg_indices_on_disk, unconfirmed, + confirmed, ack_out_counter, ack_in_counter, ack_rates @@ -353,6 +354,7 @@ msgs_on_disk :: gb_set(), msg_indices_on_disk :: gb_set(), unconfirmed :: gb_set(), + confirmed :: gb_set(), ack_out_counter :: non_neg_integer(), ack_in_counter :: non_neg_integer(), ack_rates :: rates() }). @@ -443,8 +445,8 @@ init(QueueName, true, true, AsyncCallback, SyncCallback, rabbit_msg_store:contains(Guid, PersistentClient) end, MsgIdxOnDiskFun), - init(true, IndexState, DeltaCount, Terms1, - PersistentClient, TransientClient, AsyncCallback, SyncCallback). + init(true, IndexState, DeltaCount, Terms1, AsyncCallback, SyncCallback, + PersistentClient, TransientClient). terminate(State) -> State1 = #vqstate { persistent_count = PCount, @@ -549,6 +551,9 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, persistent_count = PCount1, unconfirmed = UC1 }))}. +drain_confirmed(State = #vqstate { confirmed = C }) -> + {gb_sets:to_list(C), State #vqstate { confirmed = gb_sets:new() }}. + dropwhile(Pred, State) -> {_OkOrEmpty, State1} = dropwhile1(Pred, State), State1. @@ -981,7 +986,7 @@ msg_store_close_fds_fun(IsPersistent, Callback) -> fun (State = #vqstate { msg_store_clients = MSCState }) -> {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent), - {[], State #vqstate { msg_store_clients = MSCState1 }} + State #vqstate { msg_store_clients = MSCState1 } end) end. @@ -1068,7 +1073,7 @@ update_rate(Now, Then, Count, {OThen, OCount}) -> %%---------------------------------------------------------------------------- init(IsDurable, IndexState, DeltaCount, Terms, - PersistentClient, TransientClient, AsyncCallback, SyncCallback) -> + AsyncCallback, SyncCallback, PersistentClient, TransientClient) -> {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount), @@ -1111,6 +1116,7 @@ init(IsDurable, IndexState, DeltaCount, Terms, msgs_on_disk = gb_sets:new(), msg_indices_on_disk = gb_sets:new(), unconfirmed = gb_sets:new(), + confirmed = gb_sets:new(), ack_out_counter = 0, ack_in_counter = 0, ack_rates = blank_rate(Now, 0) }, @@ -1427,12 +1433,14 @@ confirm_commit_index(State = #vqstate { index_state = IndexState }) -> false -> State end. -remove_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, +record_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> + unconfirmed = UC, + confirmed = C }) -> State #vqstate { msgs_on_disk = gb_sets:difference(MOD, GuidSet), msg_indices_on_disk = gb_sets:difference(MIOD, GuidSet), - unconfirmed = gb_sets:difference(UC, GuidSet) }. + unconfirmed = gb_sets:difference(UC, GuidSet), + confirmed = gb_sets:union (C, GuidSet) }. needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, unconfirmed = UC }) -> @@ -1449,11 +1457,8 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, %% subtraction. not (gb_sets:is_empty(UC) orelse gb_sets:is_subset(UC, MIOD)). -msgs_confirmed(GuidSet, State) -> - {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. - blind_confirm(Callback, GuidSet) -> - Callback(fun (State) -> msgs_confirmed(GuidSet, State) end). + Callback(fun (State) -> record_confirms(GuidSet, State) end). msgs_written_to_disk(Callback, GuidSet, removed) -> blind_confirm(Callback, GuidSet); @@ -1461,22 +1466,22 @@ msgs_written_to_disk(Callback, GuidSet, written) -> Callback(fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), - State #vqstate { - msgs_on_disk = - gb_sets:union( - MOD, gb_sets:intersection(UC, GuidSet)) }) + record_confirms(gb_sets:intersection(GuidSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:union( + MOD, gb_sets:intersection(UC, GuidSet)) }) end). msg_indices_written_to_disk(Callback, GuidSet) -> Callback(fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), - State #vqstate { - msg_indices_on_disk = - gb_sets:union( - MIOD, gb_sets:intersection(UC, GuidSet)) }) + record_confirms(gb_sets:intersection(GuidSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:union( + MIOD, gb_sets:intersection(UC, GuidSet)) }) end). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 15ea3055ad204a3dc44a9f3c559cb9428bcfe8c3 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Sat, 5 Mar 2011 02:28:19 +0000 Subject: nack messages when the first queue dies --- src/rabbit_channel.erl | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 526fb428..e2437b8e 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -298,12 +298,13 @@ handle_info({'DOWN', _MRef, process, QPid, Reason}, %% process_confirms to prevent each MsgSeqNo being removed from %% the set one by one which which would be inefficient State1 = State#ch{unconfirmed_qm = gb_trees:delete_any(QPid, UQM)}, - {MXs, State2} = process_confirms(MsgSeqNos, QPid, State1), + {Nack, SendFun} = case Reason of + normal -> {false, fun record_confirms/2}; + _ -> {true, fun send_nacks/2} + end, + {MXs, State2} = process_confirms(MsgSeqNos, QPid, Nack, State1), erase_queue_stats(QPid), - State3 = (case Reason of - normal -> fun record_confirms/2; - _ -> fun send_nacks/2 - end)(MXs, State2), + State3 = SendFun(MXs, State2), noreply(queue_blocked(QPid, State3)). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> @@ -513,23 +514,25 @@ record_confirms(MXs, State = #ch{confirmed = C}) -> confirm([], _QPid, State) -> State; confirm(MsgSeqNos, QPid, State) -> - {MXs, State1} = process_confirms(MsgSeqNos, QPid, State), + {MXs, State1} = process_confirms(MsgSeqNos, QPid, false, State), record_confirms(MXs, State1). -process_confirms(MsgSeqNos, QPid, State = #ch{unconfirmed_mq = UMQ, - unconfirmed_qm = UQM}) -> +process_confirms(MsgSeqNos, QPid, Nack, State = #ch{unconfirmed_mq = UMQ, + unconfirmed_qm = UQM}) -> {MXs, UMQ1, UQM1} = lists:foldl( - fun(MsgSeqNo, {_DMs, UMQ0, _UQM} = Acc) -> + fun(MsgSeqNo, {_MXs, UMQ0, _UQM} = Acc) -> case gb_trees:lookup(MsgSeqNo, UMQ0) of - {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, Acc, - State); - none -> Acc + {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, + Acc, Nack, State); + none -> + Acc end end, {[], UMQ, UQM}, MsgSeqNos), {MXs, State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}}. -remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, State) -> +remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack, + State) -> %% these confirms will be emitted even when a queue dies, but that %% should be fine, since the queue stats get erased immediately maybe_incr_stats([{{QPid, XName}, 1}], confirm, State), @@ -544,10 +547,12 @@ remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, State) -> UQM end, Qs1 = gb_sets:del_element(QPid, Qs), - case gb_sets:is_empty(Qs1) of - true -> + %% If QPid somehow died initiating a nack, clear the message from + %% internal data-structures. Also, cleanup empty entries. + Empty = gb_sets:is_empty(Qs1), + if (Empty orelse Nack) -> {[{MsgSeqNo, XName} | MXs], gb_trees:delete(MsgSeqNo, UMQ), UQM1}; - false -> + true -> {MXs, gb_trees:update(MsgSeqNo, {XName, Qs1}, UMQ), UQM1} end. -- cgit v1.2.1 From 608ba6ef42d63b95c744d5d744ab1e4181f6ce45 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 08:53:23 +0000 Subject: cosmetic --- src/rabbit_amqqueue_process.erl | 101 +++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 57 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 7719dfe7..24de9415 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -283,17 +283,16 @@ lookup_ch(ChPid) -> ch_record(ChPid) -> Key = {ch, ChPid}, case get(Key) of - undefined -> - MonitorRef = erlang:monitor(process, ChPid), - C = #cr{consumer_count = 0, - ch_pid = ChPid, - monitor_ref = MonitorRef, - acktags = sets:new(), - is_limit_active = false, - txn = none, - unsent_message_count = 0}, - put(Key, C), - C; + undefined -> MonitorRef = erlang:monitor(process, ChPid), + C = #cr{consumer_count = 0, + ch_pid = ChPid, + monitor_ref = MonitorRef, + acktags = sets:new(), + is_limit_active = false, + txn = none, + unsent_message_count = 0}, + put(Key, C), + C; C = #cr{} -> C end. @@ -319,18 +318,16 @@ erase_ch_record(#cr{ch_pid = ChPid, erase({ch, ChPid}), ok. -all_ch_record() -> - [C || {{ch, _}, C} <- get()]. +all_ch_record() -> [C || {{ch, _}, C} <- get()]. is_ch_blocked(#cr{unsent_message_count = Count, is_limit_active = Limited}) -> Limited orelse Count >= ?UNSENT_MESSAGE_LIMIT. ch_record_state_transition(OldCR, NewCR) -> - BlockedOld = is_ch_blocked(OldCR), - BlockedNew = is_ch_blocked(NewCR), - if BlockedOld andalso not(BlockedNew) -> unblock; - BlockedNew andalso not(BlockedOld) -> block; - true -> ok + case {is_ch_blocked(OldCR), is_ch_blocked(NewCR)} of + {true, false} -> unblock; + {false, true} -> block; + {_, _} -> ok end. deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, @@ -365,13 +362,12 @@ deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, case ch_record_state_transition(C, NewC) of ok -> {queue:in(QEntry, ActiveConsumersTail), BlockedConsumers}; - block -> - {ActiveConsumers1, BlockedConsumers1} = - move_consumers(ChPid, - ActiveConsumersTail, - BlockedConsumers), - {ActiveConsumers1, - queue:in(QEntry, BlockedConsumers1)} + block -> {ActiveConsumers1, BlockedConsumers1} = + move_consumers(ChPid, + ActiveConsumersTail, + BlockedConsumers), + {ActiveConsumers1, + queue:in(QEntry, BlockedConsumers1)} end, State2 = State1#q{ active_consumers = NewActiveConsumers, @@ -396,8 +392,7 @@ deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, {FunAcc, State} end. -deliver_from_queue_pred(IsEmpty, _State) -> - not IsEmpty. +deliver_from_queue_pred(IsEmpty, _State) -> not IsEmpty. deliver_from_queue_deliver(AckRequired, false, State) -> {{Message, IsDelivered, AckTag, Remaining}, State1} = @@ -405,17 +400,16 @@ deliver_from_queue_deliver(AckRequired, false, State) -> {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> - {CMs, GTC1} = - lists:foldl( - fun(Guid, {CMs, GTC0}) -> - case dict:find(Guid, GTC0) of - {ok, {ChPid, MsgSeqNo}} -> - {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, GTC0)}; - _ -> - {CMs, GTC0} - end - end, {gb_trees:empty(), GTC}, Guids), + {CMs, GTC1} = lists:foldl( + fun(Guid, {CMs, GTC0}) -> + case dict:find(Guid, GTC0) of + {ok, {ChPid, MsgSeqNo}} -> + {gb_trees_cons(ChPid, MsgSeqNo, CMs), + dict:erase(Guid, GTC0)}; + _ -> + {CMs, GTC0} + end + end, {gb_trees:empty(), GTC}, Guids), gb_trees:map(fun(ChPid, MsgSeqNos) -> rabbit_channel:confirm(ChPid, MsgSeqNos) end, CMs), @@ -480,17 +474,14 @@ attempt_delivery(#delivery{txn = none, {Delivered, State1} = deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), {Delivered, NeedsConfirming, State1}; -attempt_delivery(#delivery{txn = Txn, +attempt_delivery(#delivery{txn = Txn, sender = ChPid, message = Message}, - {NeedsConfirming, - State = #q{backing_queue = BQ, - backing_queue_state = BQS}}) -> + {NeedsConfirming, State = #q{backing_queue = BQ, + backing_queue_state = BQS}}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), - {true, - NeedsConfirming, - State#q{backing_queue_state = - BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS)}}. + BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), + {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}. deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of @@ -661,9 +652,8 @@ drop_expired_messages(State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> Now = now_micros(), BQS1 = BQ:dropwhile( - fun (#message_properties{expiry = Expiry}) -> - Now > Expiry - end, BQS), + fun (#message_properties{expiry = Expiry}) -> Now > Expiry end, + BQS), ensure_ttl_timer(State#q{backing_queue_state = BQS1}). ensure_ttl_timer(State = #q{backing_queue = BQ, @@ -814,8 +804,7 @@ handle_call({info, Items}, _From, State) -> handle_call(consumers, _From, State) -> reply(consumers(State), State); -handle_call({deliver_immediately, Delivery}, - _From, State) -> +handle_call({deliver_immediately, Delivery}, _From, State) -> %% Synchronous, "immediate" delivery mode %% %% FIXME: Is this correct semantics? @@ -906,15 +895,13 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid, case is_ch_blocked(C) of true -> State1#q{ blocked_consumers = - add_consumer( - ChPid, Consumer, - State1#q.blocked_consumers)}; + add_consumer(ChPid, Consumer, + State1#q.blocked_consumers)}; false -> run_message_queue( State1#q{ active_consumers = - add_consumer( - ChPid, Consumer, - State1#q.active_consumers)}) + add_consumer(ChPid, Consumer, + State1#q.active_consumers)}) end, emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, not NoAck), -- cgit v1.2.1 From b0e1d30b61c493e1a108842076376cbfea72040b Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 08:55:12 +0000 Subject: add missing assertion --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 07f31a3a..591e5a66 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -544,7 +544,7 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, dropwhile(Pred, State) -> {_OkOrEmpty, State1} = dropwhile1(Pred, State), - State1. + a(State1). dropwhile1(Pred, State) -> internal_queue_out( -- cgit v1.2.1 From d4fa5254102756b8af4f95822d04285766346f31 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 10:06:16 +0000 Subject: simplify various callback constructions --- src/rabbit_variable_queue.erl | 44 +++++++++++++++++++------------------------ 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 0b22d74e..08449013 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -706,11 +706,13 @@ tx_commit(Txn, Fun, MsgPropsFun, HasPersistentPubs = PersistentGuids =/= [], {AckTags1, a(case IsDurable andalso HasPersistentPubs of - true -> ok = msg_store_sync( - MSCState, true, PersistentGuids, - msg_store_callback(PersistentGuids, Pubs, AckTags1, - Fun, MsgPropsFun, - AsyncCallback, SyncCallback)), + true -> MsgStoreCallback = + fun () -> msg_store_callback( + PersistentGuids, Pubs, AckTags1, Fun, + MsgPropsFun, AsyncCallback, SyncCallback) + end, + ok = msg_store_sync(MSCState, true, PersistentGuids, + fun () -> spawn(MsgStoreCallback) end), State; false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, Fun, MsgPropsFun, State) @@ -947,9 +949,9 @@ msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) -> msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun, Callback). msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) -> + CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE), rabbit_msg_store:client_init( - MsgStore, Ref, MsgOnDiskFun, - msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE, Callback)). + MsgStore, Ref, MsgOnDiskFun, fun () -> Callback(CloseFDsFun) end). msg_store_write(MSCState, IsPersistent, Guid, Msg) -> with_immutable_msg_store_state( @@ -981,13 +983,10 @@ msg_store_close_fds(MSCState, IsPersistent) -> MSCState, IsPersistent, fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). -msg_store_close_fds_fun(IsPersistent, Callback) -> - fun () -> Callback( - fun (State = #vqstate { msg_store_clients = MSCState }) -> - {ok, MSCState1} = - msg_store_close_fds(MSCState, IsPersistent), - State #vqstate { msg_store_clients = MSCState1 } - end) +msg_store_close_fds_fun(IsPersistent) -> + fun (State = #vqstate { msg_store_clients = MSCState }) -> + {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent), + State #vqstate { msg_store_clients = MSCState1 } end. maybe_write_delivered(false, _SeqId, IndexState) -> @@ -1131,17 +1130,12 @@ blank_rate(Timestamp, IngressLength) -> msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun, AsyncCallback, SyncCallback) -> - fun () -> spawn(fun () -> case SyncCallback( - fun (StateN) -> - tx_commit_post_msg_store( - true, Pubs, AckTags, - Fun, MsgPropsFun, StateN) - end) of - ok -> ok; - error -> remove_persistent_messages( - PersistentGuids, AsyncCallback) - end - end) + case SyncCallback(fun (StateN) -> + tx_commit_post_msg_store(true, Pubs, AckTags, + Fun, MsgPropsFun, StateN) + end) of + ok -> ok; + error -> remove_persistent_messages(PersistentGuids, AsyncCallback) end. remove_persistent_messages(Guids, AsyncCallback) -> -- cgit v1.2.1 From 867bf496f0f3917bb3109b17464e7a3c5da20ae8 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 10:48:38 +0000 Subject: shorten maybe_run_queue_via_backing_queue to something less misleading though arguably still quite obscure Also move make it clear in the amqqueue API which exports are genuine and which are for internal use only. --- src/rabbit_amqqueue.erl | 25 +++++++++-------- src/rabbit_amqqueue_process.erl | 61 ++++++++++++++++++++--------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 8e4ca8e3..0adaaa7f 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -17,23 +17,24 @@ -module(rabbit_amqqueue). -export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). --export([internal_declare/2, internal_delete/1, - maybe_run_queue_via_backing_queue/2, - maybe_run_queue_via_backing_queue_async/2, - sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, - set_maximum_since_use/2, maybe_expire/1, drop_expired/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, stat/1, deliver/2, requeue/3, ack/4, reject/4]). -export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]). --export([emit_stats/1]). -export([consumers/1, consumers_all/1]). -export([basic_get/3, basic_consume/7, basic_cancel/4]). -export([notify_sent/2, unblock/2, flush_all/2]). -export([commit_all/3, rollback_all/3, notify_down_all/2, limit_all/3]). -export([on_node_down/1]). +%% internal +-export([internal_declare/2, internal_delete/1, + run_backing_queue/2, run_backing_queue_async/2, + sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, + set_maximum_since_use/2, maybe_expire/1, drop_expired/1, + emit_stats/1]). + -include("rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). @@ -140,9 +141,9 @@ rabbit_types:connection_exit() | fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). --spec(maybe_run_queue_via_backing_queue/2 :: +-spec(run_backing_queue/2 :: (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). --spec(maybe_run_queue_via_backing_queue_async/2 :: +-spec(run_backing_queue_async/2 :: (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). @@ -438,11 +439,11 @@ internal_delete(QueueName) -> end end). -maybe_run_queue_via_backing_queue(QPid, Fun) -> - gen_server2:call(QPid, {maybe_run_queue_via_backing_queue, Fun}, infinity). +run_backing_queue(QPid, Fun) -> + gen_server2:call(QPid, {run_backing_queue, Fun}, infinity). -maybe_run_queue_via_backing_queue_async(QPid, Fun) -> - gen_server2:cast(QPid, {maybe_run_queue_via_backing_queue, Fun}). +run_backing_queue_async(QPid, Fun) -> + gen_server2:cast(QPid, {run_backing_queue, Fun}). sync_timeout(QPid) -> gen_server2:cast(QPid, sync_timeout). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 460a97ce..55ee2ee3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -163,14 +163,14 @@ bq_init(BQ, QName, IsDurable, Recover) -> Self = self(), BQ:init(QName, IsDurable, Recover, fun (Fun) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - Self, Fun) + rabbit_amqqueue:run_backing_queue_async(Self, Fun) end, fun (Fun) -> rabbit_misc:with_exit_handler( fun () -> error end, - fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( - Self, Fun) end) + fun () -> + rabbit_amqqueue:run_backing_queue(Self, Fun) + end) end). process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> @@ -517,7 +517,7 @@ deliver_or_enqueue(Delivery, State) -> end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> - maybe_run_queue_via_backing_queue( + run_backing_queue( fun (BQS) -> BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS) end, State). @@ -622,10 +622,9 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> - maybe_run_queue_via_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, - State). + run_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, State). -maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> +run_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> run_message_queue(State#q{backing_queue_state = Fun(BQS)}). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, @@ -756,29 +755,29 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {maybe_run_queue_via_backing_queue, _Fun} -> 6; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {run_backing_queue, _Fun} -> 6; + _ -> 0 end. prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; - delete_immediately -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; - maybe_expire -> 8; - drop_expired -> 8; - emit_stats -> 7; - {ack, _Txn, _MsgIds, _ChPid} -> 7; - {reject, _MsgIds, _Requeue, _ChPid} -> 7; - {notify_sent, _ChPid} -> 7; - {unblock, _ChPid} -> 7; - {maybe_run_queue_via_backing_queue, _Fun} -> 6; - sync_timeout -> 6; - _ -> 0 + update_ram_duration -> 8; + delete_immediately -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + {ack, _Txn, _MsgIds, _ChPid} -> 7; + {reject, _MsgIds, _Requeue, _ChPid} -> 7; + {notify_sent, _ChPid} -> 7; + {unblock, _ChPid} -> 7; + {run_backing_queue, _Fun} -> 6; + sync_timeout -> 6; + _ -> 0 end. prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, @@ -991,12 +990,12 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> noreply(requeue_and_run(AckTags, State)) end; -handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> - reply(ok, maybe_run_queue_via_backing_queue(Fun, State)). +handle_call({run_backing_queue, Fun}, _From, State) -> + reply(ok, run_backing_queue(Fun, State)). -handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> - noreply(maybe_run_queue_via_backing_queue(Fun, State)); +handle_cast({run_backing_queue, Fun}, State) -> + noreply(run_backing_queue(Fun, State)); handle_cast(sync_timeout, State) -> noreply(backing_queue_idle_timeout(State#q{sync_timer_ref = undefined})); -- cgit v1.2.1 From 163d45122ee547c2361193939ca41f624b70f366 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Sat, 5 Mar 2011 11:42:43 +0000 Subject: cosmetic --- src/rabbit_channel.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index e2437b8e..8afa2d8d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -525,8 +525,7 @@ process_confirms(MsgSeqNos, QPid, Nack, State = #ch{unconfirmed_mq = UMQ, case gb_trees:lookup(MsgSeqNo, UMQ0) of {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, Acc, Nack, State); - none -> - Acc + none -> Acc end end, {[], UMQ, UQM}, MsgSeqNos), {MXs, State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}}. -- cgit v1.2.1 From 282e6115095e9ed2a60c2b9f5858ff2db17d7d3a Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Sun, 6 Mar 2011 12:45:40 +0000 Subject: add test for confirms in case of queue death There's a race in the test, but it seems to work reliably. I ran it 1000 times in isolation and it didn't fail. --- src/rabbit_tests.erl | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 88b58166..4ad35696 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -57,6 +57,7 @@ all_tests() -> passed = test_cluster_management(), passed = test_user_management(), passed = test_server_status(), + passed = test_confirms(), passed = maybe_run_cluster_dependent_tests(), passed = test_configurable_server_properties(), passed. @@ -1225,6 +1226,79 @@ test_statistics_receive_event1(Ch, Matcher) -> after 1000 -> throw(failed_to_receive_event) end. +test_confirms_receiver(Pid) -> + receive + shutdown -> + ok; + {send_command, Method} -> + Pid ! Method, + test_confirms_receiver(Pid) + end. + +test_confirms() -> + {_Writer, Ch} = test_spawn(fun test_confirms_receiver/1), + DeclareBindDurableQueue = + fun() -> + rabbit_channel:do(Ch, #'queue.declare'{durable = true}), + receive #'queue.declare_ok'{queue = Q0} -> + rabbit_channel:do(Ch, #'queue.bind'{ + queue = Q0, + exchange = <<"amq.direct">>, + routing_key = "magic" }), + receive #'queue.bind_ok'{} -> + Q0 + after 1000 -> + throw(failed_to_bind_queue) + end + after 1000 -> + throw(failed_to_declare_queue) + end + end, + %% Declare and bind two queues + QName1 = DeclareBindDurableQueue(), + QName2 = DeclareBindDurableQueue(), + %% Get the first one's pid (we'll crash it later) + {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)), + QPid1 = Q1#amqqueue.pid, + %% Enable confirms + rabbit_channel:do(Ch, #'confirm.select'{}), + receive #'confirm.select_ok'{} -> + ok + after 1000 -> + throw(failed_to_enable_confirms) + end, + %% Publish a message + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>, + routing_key = "magic" + }, + rabbit_basic:build_content( + #'P_basic'{delivery_mode = 2}, <<"">>)), + %% Crash the queue + QPid1 ! boom, + %% Wait for a nack + receive + #'basic.nack'{} -> + ok; + #'basic.ack'{} -> + throw(received_ack_instead_of_nack) + after 2000 -> + throw(did_not_receive_nack) + end, + receive + #'basic.ack'{} -> + throw(received_ack_when_none_expected) + after 1000 -> + ok + end, + %% Delete queue + rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), + receive #'queue.delete_ok'{} -> + ok + after 1000 -> + throw(failed_to_cleanup_queue) + end, + passed. + test_statistics() -> application:set_env(rabbit, collect_statistics, fine), -- cgit v1.2.1 From cee0c032110bfd427d854a0e78da1087c5f4bf28 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 11:00:17 +0000 Subject: Tweaked delete accumulator key --- src/rabbit_exchange_type_topic.erl | 60 ++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 98b223ff..7ff0808e 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -67,56 +67,56 @@ add_binding(true, _Exchange, Binding) -> add_binding(false, _Exchange, _Binding) -> ok. -remove_bindings(true, _X, Bs) -> +remove_bindings(true, X, Bs) -> {ToDelete, Paths} = lists:foldl( - fun(B = #binding{source = X, destination = D}, {Acc, PathAcc}) -> + fun(B = #binding{destination = D}, {Acc, PathAcc}) -> Path = [{FinalNode, _} | _] = binding_path(B), - PathAcc1 = decrement_bindings(X, Path, maybe_add_path( - X, Path, PathAcc)), - {[{X, FinalNode, D} | Acc], PathAcc1} + PathAcc1 = decrement_bindings(Path, + maybe_add_path(Path, PathAcc)), + {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), - [trie_remove_binding(X, FinalNode, D) || {X, FinalNode, D} <- ToDelete], + [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {{X, [{Node, W}, {Parent, _} | _ ]}, {0, 0}} + {[{Node, W}, {Parent, _} | _ ], {0, 0}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> ok. -maybe_add_path(_X, [{root, none}], PathAcc) -> +maybe_add_path([{root, none}], PathAcc) -> PathAcc; -maybe_add_path(X, Path, PathAcc) -> - case gb_trees:is_defined({X, Path}, PathAcc) of +maybe_add_path(Path, PathAcc) -> + case gb_trees:is_defined(Path, PathAcc) of true -> PathAcc; - false -> gb_trees:insert({X, Path}, counts(X, Path), PathAcc) + false -> gb_trees:insert(Path, counts(Path), PathAcc) end. -decrement_bindings(X, Path, PathAcc) -> +decrement_bindings(Path, PathAcc) -> with_path_acc(fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, - X, Path, PathAcc). + Path, PathAcc). -decrement_edges(X, Path, PathAcc) -> +decrement_edges(Path, PathAcc) -> with_path_acc(fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, - X, Path, PathAcc). + Path, PathAcc). -with_path_acc(_Fun, _X, [{root, none}], PathAcc) -> +with_path_acc(_Fun, [{root, none}], PathAcc) -> PathAcc; -with_path_acc(Fun, X, Path, PathAcc) -> - NewVal = Fun(gb_trees:get({X, Path}, PathAcc)), - NewPathAcc = gb_trees:update({X, Path}, NewVal, PathAcc), +with_path_acc(Fun, Path, PathAcc) -> + NewVal = Fun(gb_trees:get(Path, PathAcc)), + NewPathAcc = gb_trees:update(Path, NewVal, PathAcc), case NewVal of {0, 0} -> [_ | ParentPath] = Path, - decrement_edges(X, ParentPath, - maybe_add_path(X, ParentPath, NewPathAcc)); + decrement_edges(ParentPath, + maybe_add_path(ParentPath, NewPathAcc)); _ -> NewPathAcc end. -counts(X, [{FinalNode, _} | _]) -> - {trie_binding_count(X, FinalNode), trie_child_count(X, FinalNode)}. +counts([{FinalNode, _} | _]) -> + {trie_binding_count(FinalNode), trie_child_count(FinalNode)}. binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). @@ -232,19 +232,17 @@ trie_binding_op(X, Node, D, Op) -> destination = D}}, write). -trie_child_count(X, Node) -> +trie_child_count(Node) -> count(rabbit_topic_trie_edge, - #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, - node_id = Node, - _ = '_'}, + #topic_trie_edge{trie_edge = #trie_edge{node_id = Node, + _ = '_'}, _ = '_'}). -trie_binding_count(X, Node) -> +trie_binding_count(Node) -> count(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - _ = '_'}, + trie_binding = #trie_binding{node_id = Node, + _ = '_'}, _ = '_'}). count(Table, Match) -> -- cgit v1.2.1 From 8cc78e11a85a15770725aa6808d84397abb79521 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 7 Mar 2011 14:32:43 +0000 Subject: document bq init params --- src/rabbit_backing_queue.erl | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index b06f1e9c..dfee2ee3 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -33,6 +33,18 @@ behaviour_info(callbacks) -> {stop, 0}, %% Initialise the backing queue and its state. + %% + %% Takes + %% 1. the queue name + %% 2. a boolean indicating whether the queue is durable + %% 3. a boolean indicating whether the queue is an existing queue + %% that should be recovered + %% 4. an asynchronous callback which can be invoked by the + %% backing queue when an event has occured that requires a + %% state transition. The callback accepts a function from + %% state to state. + %% 5. a synchronous callback. Same as the asynchronous callback + %% but waits for completion and returns 'error' on error. {init, 5}, %% Called on queue shutdown when queue isn't being deleted. -- cgit v1.2.1 From e20fb0e89b7f0c2875921318395c2fffc4dfd4ac Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 15:01:13 +0000 Subject: Added exchange name back into count functions. Made path node id key in delete accumulator --- src/rabbit_exchange_type_topic.erl | 58 ++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 7ff0808e..f9ac69ba 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -72,8 +72,9 @@ remove_bindings(true, X, Bs) -> lists:foldl( fun(B = #binding{destination = D}, {Acc, PathAcc}) -> Path = [{FinalNode, _} | _] = binding_path(B), - PathAcc1 = decrement_bindings(Path, - maybe_add_path(Path, PathAcc)), + PathAcc1 = decrement_bindings(X, Path, + maybe_add_path(X, Path, + PathAcc)), {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), @@ -85,38 +86,43 @@ remove_bindings(true, X, Bs) -> remove_bindings(false, _X, _Bs) -> ok. -maybe_add_path([{root, none}], PathAcc) -> +maybe_add_path(_X, [{root, none}], PathAcc) -> PathAcc; -maybe_add_path(Path, PathAcc) -> - case gb_trees:is_defined(Path, PathAcc) of +maybe_add_path(X, Path = [{Node, _} | _], PathAcc) -> + case gb_trees:is_defined(Node, PathAcc) of true -> PathAcc; - false -> gb_trees:insert(Path, counts(Path), PathAcc) + false -> gb_trees:insert(Node, path_entry(X, Path), PathAcc) end. -decrement_bindings(Path, PathAcc) -> - with_path_acc(fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, +decrement_bindings(X, Path, PathAcc) -> + with_path_acc(X, + fun({_Path, Bindings, Edges}) -> + {Path, Bindings - 1, Edges} + end, Path, PathAcc). -decrement_edges(Path, PathAcc) -> - with_path_acc(fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, +decrement_edges(X, Path, PathAcc) -> + with_path_acc(X, + fun({_Path, Bindings, Edges}) -> + {Path, Bindings, Edges - 1} + end, Path, PathAcc). -with_path_acc(_Fun, [{root, none}], PathAcc) -> +with_path_acc(_X, _Fun, [{root, none}], PathAcc) -> PathAcc; -with_path_acc(Fun, Path, PathAcc) -> - NewVal = Fun(gb_trees:get(Path, PathAcc)), - NewPathAcc = gb_trees:update(Path, NewVal, PathAcc), +with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> + NewVal = Fun(gb_trees:get(Node, PathAcc)), + NewPathAcc = gb_trees:update(Node, NewVal, PathAcc), case NewVal of {0, 0} -> - [_ | ParentPath] = Path, - decrement_edges(ParentPath, - maybe_add_path(ParentPath, NewPathAcc)); + decrement_edges(X, ParentPath, + maybe_add_path(X, ParentPath, NewPathAcc)); _ -> NewPathAcc end. -counts([{FinalNode, _} | _]) -> - {trie_binding_count(FinalNode), trie_child_count(FinalNode)}. +path_entry(X, Path = [{Node, _} | _]) -> + {Path, trie_binding_count(X, Node), trie_child_count(X, Node)}. binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). @@ -232,17 +238,19 @@ trie_binding_op(X, Node, D, Op) -> destination = D}}, write). -trie_child_count(Node) -> +trie_child_count(X, Node) -> count(rabbit_topic_trie_edge, - #topic_trie_edge{trie_edge = #trie_edge{node_id = Node, - _ = '_'}, + #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + node_id = Node, + _ = '_'}, _ = '_'}). -trie_binding_count(Node) -> +trie_binding_count(X, Node) -> count(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{node_id = Node, - _ = '_'}, + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + _ = '_'}, _ = '_'}). count(Table, Match) -> -- cgit v1.2.1 From 4edad8a5317c97417b0340ce617b5150a069587a Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 15:12:17 +0000 Subject: Comment describing the why behind the refactoring of remove_bindings --- src/rabbit_exchange_type_topic.erl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index f9ac69ba..5c5d760e 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -68,6 +68,11 @@ add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, X, Bs) -> + %% The remove process is split into two distinct phases. In the + %% first phase, we first gather the lists of bindings and edges to + %% delete, then in the second phase we process all the + %% deletions. This is to prevent interleaving of read/write + %% operations in mnesia that can adversely affect performance. {ToDelete, Paths} = lists:foldl( fun(B = #binding{destination = D}, {Acc, PathAcc}) -> -- cgit v1.2.1 From 2219c3d554b1615a967acf62a288ac5813f2cb67 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 7 Mar 2011 15:27:54 +0000 Subject: guids be gone --- src/rabbit_amqqueue_process.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 21541541..b32fa0ff 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -401,11 +401,11 @@ deliver_from_queue_deliver(AckRequired, false, State) -> confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> {CMs, MTC1} = lists:foldl( - fun(Guid, {CMs, MTC0}) -> - case dict:find(Guid, MTC0) of + fun(MsgId, {CMs, MTC0}) -> + case dict:find(MsgId, MTC0) of {ok, {ChPid, MsgSeqNo}} -> {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, MTC0)}; + dict:erase(MsgId, MTC0)}; _ -> {CMs, MTC0} end -- cgit v1.2.1 From 765b4d8124fa44b8116211619d3cab9ecde2e106 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 15:53:17 +0000 Subject: Remove EnvVarUpdate --- packaging/windows-exe/lib/EnvVarUpdate.nsh | 327 ----------------------------- packaging/windows-exe/rabbitmq_nsi.in | 7 - 2 files changed, 334 deletions(-) delete mode 100644 packaging/windows-exe/lib/EnvVarUpdate.nsh diff --git a/packaging/windows-exe/lib/EnvVarUpdate.nsh b/packaging/windows-exe/lib/EnvVarUpdate.nsh deleted file mode 100644 index 839d6a02..00000000 --- a/packaging/windows-exe/lib/EnvVarUpdate.nsh +++ /dev/null @@ -1,327 +0,0 @@ -/** - * EnvVarUpdate.nsh - * : Environmental Variables: append, prepend, and remove entries - * - * WARNING: If you use StrFunc.nsh header then include it before this file - * with all required definitions. This is to avoid conflicts - * - * Usage: - * ${EnvVarUpdate} "ResultVar" "EnvVarName" "Action" "RegLoc" "PathString" - * - * Credits: - * Version 1.0 - * * Cal Turney (turnec2) - * * Amir Szekely (KiCHiK) and e-circ for developing the forerunners of this - * function: AddToPath, un.RemoveFromPath, AddToEnvVar, un.RemoveFromEnvVar, - * WriteEnvStr, and un.DeleteEnvStr - * * Diego Pedroso (deguix) for StrTok - * * Kevin English (kenglish_hi) for StrContains - * * Hendri Adriaens (Smile2Me), Diego Pedroso (deguix), and Dan Fuhry - * (dandaman32) for StrReplace - * - * Version 1.1 (compatibility with StrFunc.nsh) - * * techtonik - * - * http://nsis.sourceforge.net/Environmental_Variables:_append%2C_prepend%2C_and_remove_entries - * - */ - - -!ifndef ENVVARUPDATE_FUNCTION -!define ENVVARUPDATE_FUNCTION -!verbose push -!verbose 3 -!include "LogicLib.nsh" -!include "WinMessages.NSH" -!include "StrFunc.nsh" - -; ---- Fix for conflict if StrFunc.nsh is already includes in main file ----------------------- -!macro _IncludeStrFunction StrFuncName - !ifndef ${StrFuncName}_INCLUDED - ${${StrFuncName}} - !endif - !ifndef Un${StrFuncName}_INCLUDED - ${Un${StrFuncName}} - !endif - !define un.${StrFuncName} "${Un${StrFuncName}}" -!macroend - -!insertmacro _IncludeStrFunction StrTok -!insertmacro _IncludeStrFunction StrStr -!insertmacro _IncludeStrFunction StrRep - -; ---------------------------------- Macro Definitions ---------------------------------------- -!macro _EnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString - Push "${EnvVarName}" - Push "${Action}" - Push "${RegLoc}" - Push "${PathString}" - Call EnvVarUpdate - Pop "${ResultVar}" -!macroend -!define EnvVarUpdate '!insertmacro "_EnvVarUpdateConstructor"' - -!macro _unEnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString - Push "${EnvVarName}" - Push "${Action}" - Push "${RegLoc}" - Push "${PathString}" - Call un.EnvVarUpdate - Pop "${ResultVar}" -!macroend -!define un.EnvVarUpdate '!insertmacro "_unEnvVarUpdateConstructor"' -; ---------------------------------- Macro Definitions end------------------------------------- - -;----------------------------------- EnvVarUpdate start---------------------------------------- -!define hklm_all_users 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' -!define hkcu_current_user 'HKCU "Environment"' - -!macro EnvVarUpdate UN - -Function ${UN}EnvVarUpdate - - Push $0 - Exch 4 - Exch $1 - Exch 3 - Exch $2 - Exch 2 - Exch $3 - Exch - Exch $4 - Push $5 - Push $6 - Push $7 - Push $8 - Push $9 - Push $R0 - - /* After this point: - ------------------------- - $0 = ResultVar (returned) - $1 = EnvVarName (input) - $2 = Action (input) - $3 = RegLoc (input) - $4 = PathString (input) - $5 = Orig EnvVar (read from registry) - $6 = Len of $0 (temp) - $7 = tempstr1 (temp) - $8 = Entry counter (temp) - $9 = tempstr2 (temp) - $R0 = tempChar (temp) */ - - ; Step 1: Read contents of EnvVarName from RegLoc - ; - ; Check for empty EnvVarName - ${If} $1 == "" - SetErrors - DetailPrint "ERROR: EnvVarName is blank" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Check for valid Action - ${If} $2 != "A" - ${AndIf} $2 != "P" - ${AndIf} $2 != "R" - SetErrors - DetailPrint "ERROR: Invalid Action - must be A, P, or R" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ${If} $3 == HKLM - ReadRegStr $5 ${hklm_all_users} $1 ; Get EnvVarName from all users into $5 - ${ElseIf} $3 == HKCU - ReadRegStr $5 ${hkcu_current_user} $1 ; Read EnvVarName from current user into $5 - ${Else} - SetErrors - DetailPrint 'ERROR: Action is [$3] but must be "HKLM" or HKCU"' - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Check for empty PathString - ${If} $4 == "" - SetErrors - DetailPrint "ERROR: PathString is blank" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Make sure we've got some work to do - ${If} $5 == "" - ${AndIf} $2 == "R" - SetErrors - DetailPrint "$1 is empty - Nothing to remove" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Step 2: Scrub EnvVar - ; - StrCpy $0 $5 ; Copy the contents to $0 - ; Remove spaces around semicolons (NOTE: spaces before the 1st entry or - ; after the last one are not removed here but instead in Step 3) - ${If} $0 != "" ; If EnvVar is not empty ... - ${Do} - ${${UN}StrStr} $7 $0 " ;" - ${If} $7 == "" - ${ExitDo} - ${EndIf} - ${${UN}StrRep} $0 $0 " ;" ";" ; Remove ';' - ${Loop} - ${Do} - ${${UN}StrStr} $7 $0 "; " - ${If} $7 == "" - ${ExitDo} - ${EndIf} - ${${UN}StrRep} $0 $0 "; " ";" ; Remove ';' - ${Loop} - ${Do} - ${${UN}StrStr} $7 $0 ";;" - ${If} $7 == "" - ${ExitDo} - ${EndIf} - ${${UN}StrRep} $0 $0 ";;" ";" - ${Loop} - - ; Remove a leading or trailing semicolon from EnvVar - StrCpy $7 $0 1 0 - ${If} $7 == ";" - StrCpy $0 $0 "" 1 ; Change ';' to '' - ${EndIf} - StrLen $6 $0 - IntOp $6 $6 - 1 - StrCpy $7 $0 1 $6 - ${If} $7 == ";" - StrCpy $0 $0 $6 ; Change ';' to '' - ${EndIf} - ; DetailPrint "Scrubbed $1: [$0]" ; Uncomment to debug - ${EndIf} - - /* Step 3. Remove all instances of the target path/string (even if "A" or "P") - $6 = bool flag (1 = found and removed PathString) - $7 = a string (e.g. path) delimited by semicolon(s) - $8 = entry counter starting at 0 - $9 = copy of $0 - $R0 = tempChar */ - - ${If} $5 != "" ; If EnvVar is not empty ... - StrCpy $9 $0 - StrCpy $0 "" - StrCpy $8 0 - StrCpy $6 0 - - ${Do} - ${${UN}StrTok} $7 $9 ";" $8 "0" ; $7 = next entry, $8 = entry counter - - ${If} $7 == "" ; If we've run out of entries, - ${ExitDo} ; were done - ${EndIf} ; - - ; Remove leading and trailing spaces from this entry (critical step for Action=Remove) - ${Do} - StrCpy $R0 $7 1 - ${If} $R0 != " " - ${ExitDo} - ${EndIf} - StrCpy $7 $7 "" 1 ; Remove leading space - ${Loop} - ${Do} - StrCpy $R0 $7 1 -1 - ${If} $R0 != " " - ${ExitDo} - ${EndIf} - StrCpy $7 $7 -1 ; Remove trailing space - ${Loop} - ${If} $7 == $4 ; If string matches, remove it by not appending it - StrCpy $6 1 ; Set 'found' flag - ${ElseIf} $7 != $4 ; If string does NOT match - ${AndIf} $0 == "" ; and the 1st string being added to $0, - StrCpy $0 $7 ; copy it to $0 without a prepended semicolon - ${ElseIf} $7 != $4 ; If string does NOT match - ${AndIf} $0 != "" ; and this is NOT the 1st string to be added to $0, - StrCpy $0 $0;$7 ; append path to $0 with a prepended semicolon - ${EndIf} ; - - IntOp $8 $8 + 1 ; Bump counter - ${Loop} ; Check for duplicates until we run out of paths - ${EndIf} - - ; Step 4: Perform the requested Action - ; - ${If} $2 != "R" ; If Append or Prepend - ${If} $6 == 1 ; And if we found the target - DetailPrint "Target is already present in $1. It will be removed and" - ${EndIf} - ${If} $0 == "" ; If EnvVar is (now) empty - StrCpy $0 $4 ; just copy PathString to EnvVar - ${If} $6 == 0 ; If found flag is either 0 - ${OrIf} $6 == "" ; or blank (if EnvVarName is empty) - DetailPrint "$1 was empty and has been updated with the target" - ${EndIf} - ${ElseIf} $2 == "A" ; If Append (and EnvVar is not empty), - StrCpy $0 $0;$4 ; append PathString - ${If} $6 == 1 - DetailPrint "appended to $1" - ${Else} - DetailPrint "Target was appended to $1" - ${EndIf} - ${Else} ; If Prepend (and EnvVar is not empty), - StrCpy $0 $4;$0 ; prepend PathString - ${If} $6 == 1 - DetailPrint "prepended to $1" - ${Else} - DetailPrint "Target was prepended to $1" - ${EndIf} - ${EndIf} - ${Else} ; If Action = Remove - ${If} $6 == 1 ; and we found the target - DetailPrint "Target was found and removed from $1" - ${Else} - DetailPrint "Target was NOT found in $1 (nothing to remove)" - ${EndIf} - ${If} $0 == "" - DetailPrint "$1 is now empty" - ${EndIf} - ${EndIf} - - ; Step 5: Update the registry at RegLoc with the updated EnvVar and announce the change - ; - ClearErrors - ${If} $3 == HKLM - WriteRegExpandStr ${hklm_all_users} $1 $0 ; Write it in all users section - ${ElseIf} $3 == HKCU - WriteRegExpandStr ${hkcu_current_user} $1 $0 ; Write it to current user section - ${EndIf} - - IfErrors 0 +4 - MessageBox MB_OK|MB_ICONEXCLAMATION "Could not write updated $1 to $3" - DetailPrint "Could not write updated $1 to $3" - Goto EnvVarUpdate_Restore_Vars - - ; "Export" our change - SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 - - EnvVarUpdate_Restore_Vars: - ; - ; Restore the user's variables and return ResultVar - Pop $R0 - Pop $9 - Pop $8 - Pop $7 - Pop $6 - Pop $5 - Pop $4 - Pop $3 - Pop $2 - Pop $1 - Push $0 ; Push my $0 (ResultVar) - Exch - Pop $0 ; Restore his $0 - -FunctionEnd - -!macroend ; EnvVarUpdate UN -!insertmacro EnvVarUpdate "" -!insertmacro EnvVarUpdate "un." -;----------------------------------- EnvVarUpdate end---------------------------------------- - -!verbose pop -!endif diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in index 6d79ffd4..3da8f4d2 100644 --- a/packaging/windows-exe/rabbitmq_nsi.in +++ b/packaging/windows-exe/rabbitmq_nsi.in @@ -4,7 +4,6 @@ !include WinMessages.nsh !include FileFunc.nsh !include WordFunc.nsh -!include lib\EnvVarUpdate.nsh !define env_hklm 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' !define uninstall "Software\Microsoft\Windows\CurrentVersion\Uninstall\RabbitMQ" @@ -77,9 +76,6 @@ Section "RabbitMQ Server (required)" Rabbit File /r "rabbitmq_server-%%VERSION%%" File "rabbitmq.ico" - ; Add to PATH - ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" - ; Write the installation path into the registry WriteRegStr HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server" "Install_Dir" "$INSTDIR" @@ -157,9 +153,6 @@ Section "Uninstall" ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" stop' ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" remove' - ; Remove from PATH - ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" - ; Remove files and uninstaller RMDir /r "$INSTDIR\rabbitmq_server-%%VERSION%%" Delete "$INSTDIR\rabbitmq.ico" -- cgit v1.2.1 From d0c3b429118272bade0b25ef690402af474f9fc8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 16:25:36 +0000 Subject: Add a command prompt shortcut that starts up in the right dir. That's pretty lame, but at least it's safe. --- packaging/windows-exe/rabbitmq_nsi.in | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in index 3da8f4d2..1ed4064e 100644 --- a/packaging/windows-exe/rabbitmq_nsi.in +++ b/packaging/windows-exe/rabbitmq_nsi.in @@ -122,6 +122,9 @@ Section "Start Menu" RabbitStartMenu CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Start Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "start" "$INSTDIR\rabbitmq.ico" CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Stop Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "stop" "$INSTDIR\rabbitmq.ico" + SetOutPath "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Command Prompt (sbin dir).lnk" "$WINDIR\system32\cmd.exe" "" "$WINDIR\system32\cmd.exe" + SetOutPath $INSTDIR SectionEnd ;-------------------------------- -- cgit v1.2.1 From 0d876f0dd1a5e4db1d8a441f53dac2f7aa4e6578 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 16:57:21 +0000 Subject: Fixed a few pattern matching errors - remove actually works again --- src/rabbit_exchange_type_topic.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 5c5d760e..ff4828c1 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -83,9 +83,10 @@ remove_bindings(true, X, Bs) -> {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), + io:format("~p~n", [Paths]), [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {[{Node, W}, {Parent, _} | _ ], {0, 0}} + {Node, {[{Node, W}, {Parent, _} | _], 0, 0}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> @@ -119,7 +120,7 @@ with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> NewVal = Fun(gb_trees:get(Node, PathAcc)), NewPathAcc = gb_trees:update(Node, NewVal, PathAcc), case NewVal of - {0, 0} -> + {_, 0, 0} -> decrement_edges(X, ParentPath, maybe_add_path(X, ParentPath, NewPathAcc)); _ -> -- cgit v1.2.1 From 13bbf692083e6ab07f771b797333f695dc18db32 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:00:11 +0000 Subject: Explain the tuple here. --- src/rabbit_upgrade.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f1f0d6d3..dd253468 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -242,6 +242,8 @@ read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [V]} -> case is_new_version(V) of false -> {ok, convert_old_version(V)}; + %% Write in this format for future expansion; + %% we want to allow plugins to own upgrades. true -> [{rabbit, RV}] = V, {ok, RV} end; -- cgit v1.2.1 From 4d806324368f812c4831724f868079f3a2835892 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:06:53 +0000 Subject: Variety of small QA-related tweaks. --- src/rabbit.erl | 2 +- src/rabbit_mnesia.erl | 7 ++++--- src/rabbit_upgrade.erl | 20 ++++++++++---------- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 21c1452f..e3288eaf 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -204,7 +204,7 @@ start() -> end. stop() -> - rabbit_mnesia:record_running_disc_nodes(), + ok = rabbit_mnesia:record_running_disc_nodes(), ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 33e8764c..30083cc0 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -387,7 +387,8 @@ record_running_disc_nodes() -> sets:from_list(running_clustered_nodes()))) -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. - rabbit_misc:write_term_file(FileName, [Nodes]). + rabbit_misc:write_term_file(FileName, [Nodes]), + ok. read_previous_run_disc_nodes() -> FileName = running_nodes_filename(), @@ -433,7 +434,7 @@ init_db(ClusterNodes, Force) -> ok = create_schema(); {[], true} -> %% We're the first node up - case rabbit_upgrade:maybe_upgrade(local) of + case rabbit_upgrade:maybe_upgrade_local() of ok -> ensure_schema_integrity(); version_not_available -> schema_ok_or_move() end; @@ -449,7 +450,7 @@ init_db(ClusterNodes, Force) -> true -> disc; false -> ram end), - case rabbit_upgrade:maybe_upgrade(local) of + case rabbit_upgrade:maybe_upgrade_local() of ok -> ok; %% If we're just starting up a new node we won't have diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dd253468..e466eb87 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -16,7 +16,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade_mnesia/0, maybe_upgrade/1]). +-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). -export([read_version/0, write_version/0, desired_version/0, desired_version/1]). @@ -35,7 +35,7 @@ -type(scope() :: 'mnesia' | 'local'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). --spec(maybe_upgrade/1 :: (scope()) -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -128,7 +128,7 @@ upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), - case {am_i_disc_node(), AfterUs} of + case {is_disc_node(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -169,7 +169,7 @@ upgrade_mode(AllNodes) -> end end. -am_i_disc_node() -> +is_disc_node() -> %% This is pretty ugly but we can't start Mnesia and ask it (will hang), %% we can't look at the config file (may not include us even if we're a %% disc node). @@ -210,13 +210,13 @@ secondary_upgrade(AllNodes) -> %% Note that we cluster with all nodes, rather than all disc nodes %% (as we can't know all disc nodes at this point). This is safe as %% we're not writing the cluster config, just setting up Mnesia. - ClusterNodes = case am_i_disc_node() of + ClusterNodes = case is_disc_node() of true -> AllNodes; false -> AllNodes -- [node()] end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - rabbit_mnesia:init_db(ClusterNodes, true), - write_version(mnesia), + ok = rabbit_mnesia:init_db(ClusterNodes, true), + ok = write_version(mnesia), ok. nodes_running(Nodes) -> @@ -230,11 +230,11 @@ node_running(Node) -> %% ------------------------------------------------------------------- -maybe_upgrade(Scope) -> - case upgrades_required(Scope) of +maybe_upgrade_local() -> + case upgrades_required(local) of version_not_available -> version_not_available; [] -> ok; - Upgrades -> apply_upgrades(Scope, Upgrades, + Upgrades -> apply_upgrades(local, Upgrades, fun() -> ok end) end. -- cgit v1.2.1 From 165c1d3f25a44c91650556a68ba725239f1f8d12 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:21:36 +0000 Subject: Spec, rename functions. --- src/rabbit_mnesia.erl | 13 +++++++------ src/rabbit_upgrade.erl | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 30083cc0..eb92e9fe 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -22,8 +22,8 @@ is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, - record_running_disc_nodes/0, read_previous_run_disc_nodes/0, - delete_previous_run_disc_nodes/0, running_nodes_filename/0]). + record_running_disc_nodes/0, read_previously_running_disc_nodes/0, + delete_previously_running_disc_nodes/0, running_nodes_filename/0]). -export([table_names/0]). @@ -45,6 +45,7 @@ -spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). +-spec(init_db/2 :: ([node()], boolean()) -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). -spec(cluster/1 :: ([node()]) -> 'ok'). -spec(force_cluster/1 :: ([node()]) -> 'ok'). @@ -61,8 +62,8 @@ -spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). -spec(read_cluster_nodes_config/0 :: () -> [node()]). -spec(record_running_disc_nodes/0 :: () -> 'ok'). --spec(read_previous_run_disc_nodes/0 :: () -> [node()]). --spec(delete_previous_run_disc_nodes/0 :: () -> 'ok'). +-spec(read_previously_running_disc_nodes/0 :: () -> [node()]). +-spec(delete_previously_running_disc_nodes/0 :: () -> 'ok'). -spec(running_nodes_filename/0 :: () -> file:filename()). -endif. @@ -390,7 +391,7 @@ record_running_disc_nodes() -> rabbit_misc:write_term_file(FileName, [Nodes]), ok. -read_previous_run_disc_nodes() -> +read_previously_running_disc_nodes() -> FileName = running_nodes_filename(), case rabbit_misc:read_term_file(FileName) of {ok, [Nodes]} -> Nodes; @@ -399,7 +400,7 @@ read_previous_run_disc_nodes() -> FileName, Reason}}) end. -delete_previous_run_disc_nodes() -> +delete_previously_running_disc_nodes() -> FileName = running_nodes_filename(), case file:delete(FileName) of ok -> ok; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index e466eb87..0a821878 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -122,12 +122,12 @@ maybe_upgrade_mnesia() -> secondary -> secondary_upgrade(AllNodes) end end, - ok = rabbit_mnesia:delete_previous_run_disc_nodes(). + ok = rabbit_mnesia:delete_previously_running_disc_nodes(). upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> - AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), + AfterUs = rabbit_mnesia:read_previously_running_disc_nodes(), case {is_disc_node(), AfterUs} of {true, []} -> primary; -- cgit v1.2.1 From f6d550f49e9e90a551ecd20e80d405068db7d781 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:23:56 +0000 Subject: Simpler is_new_version/1. --- src/rabbit_upgrade.erl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0a821878..f59dbdfe 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -401,7 +401,8 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). info(Msg, Args) -> error_logger:info_msg(Msg, Args). is_new_version(Version) -> - is_list(Version) andalso - length(Version) > 0 andalso - lists:all(fun(Item) -> is_tuple(Item) andalso size(Item) == 2 end, - Version). + try + orddict:size(Version) > 0 + catch error:badarg -> + false + end. -- cgit v1.2.1 From 32bcf5f5f54f8a57dc8b9b27966aebc679dee7d6 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 8 Mar 2011 10:05:11 +0000 Subject: Removed io:format --- src/rabbit_exchange_type_topic.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ff4828c1..7cff129c 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -83,7 +83,6 @@ remove_bindings(true, X, Bs) -> {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), - io:format("~p~n", [Paths]), [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || {Node, {[{Node, W}, {Parent, _} | _], 0, 0}} -- cgit v1.2.1 From 21343de94bc744bc430bbc2b72d9526c284432a6 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 8 Mar 2011 11:11:57 +0000 Subject: Use system_info(check_io)/max_fds instead of ulimit. --- src/file_handle_cache.erl | 39 ++++++++++----------------------------- 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 6f8241b3..de602d39 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -156,13 +156,6 @@ -define(SERVER, ?MODULE). -define(RESERVED_FOR_OTHERS, 100). -%% Googling around suggests that Windows has a limit somewhere around -%% 16M, eg -%% http://blogs.technet.com/markrussinovich/archive/2009/09/29/3283844.aspx -%% however, it turns out that's only available through the win32 -%% API. Via the C Runtime, we have just 512: -%% http://msdn.microsoft.com/en-us/library/6e3b887c%28VS.80%29.aspx --define(FILE_HANDLES_LIMIT_WINDOWS, 512). -define(FILE_HANDLES_LIMIT_OTHER, 1024). -define(FILE_HANDLES_CHECK_INTERVAL, 2000). @@ -1185,29 +1178,17 @@ track_client(Pid, Clients) -> false -> ok end. -%% For all unices, assume ulimit exists. Further googling suggests -%% that BSDs (incl OS X), solaris and linux all agree that ulimit -n -%% is file handles + ulimit() -> - case os:type() of - {win32, _OsName} -> - ?FILE_HANDLES_LIMIT_WINDOWS; - {unix, _OsName} -> - %% Under Linux, Solaris and FreeBSD, ulimit is a shell - %% builtin, not a command. In OS X and AIX it's a command. - %% Fortunately, os:cmd invokes the cmd in a shell env, so - %% we're safe in all cases. - case os:cmd("ulimit -n") of - "unlimited" -> - infinity; - String = [C|_] when $0 =< C andalso C =< $9 -> - list_to_integer( - lists:takewhile( - fun (D) -> $0 =< D andalso D =< $9 end, String)); - _ -> - %% probably a variant of - %% "/bin/sh: line 1: ulimit: command not found\n" - unknown + case proplists:get_value(max_fds, erlang:system_info(check_io)) of + MaxFds when is_integer(MaxFds) andalso MaxFds > 1 -> + case os:type() of + {win32, _OsName} -> + %% On Windows max_fds is twice the number of open files: + %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 + MaxFds / 2; + _Any -> + MaxFds end; _ -> unknown -- cgit v1.2.1 From 788d800206742fd0630dc6cfb5d1a2e533c2f416 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 8 Mar 2011 11:16:35 +0000 Subject: Short comment --- src/file_handle_cache.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index de602d39..f27adfd9 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1179,6 +1179,8 @@ track_client(Pid, Clients) -> end. +%% To increase the number of file descriptors: on Windows set ERL_MAX_PORTS +%% environment variable, on Linux set `ulimit -n`. ulimit() -> case proplists:get_value(max_fds, erlang:system_info(check_io)) of MaxFds when is_integer(MaxFds) andalso MaxFds > 1 -> @@ -1188,6 +1190,7 @@ ulimit() -> %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 MaxFds / 2; _Any -> + %% For other operating systems trust Erlang. MaxFds end; _ -> -- cgit v1.2.1 From 4dfbd8dc307d9fe76b7df03bfc101a55faa09837 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 8 Mar 2011 11:58:10 +0000 Subject: Floats are bad. Use integer division instead. (via Matthias) --- src/file_handle_cache.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index f27adfd9..304f9335 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1188,7 +1188,7 @@ ulimit() -> {win32, _OsName} -> %% On Windows max_fds is twice the number of open files: %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 - MaxFds / 2; + MaxFds div 2; _Any -> %% For other operating systems trust Erlang. MaxFds -- cgit v1.2.1 From b926ae697507a7f61801613107bd90a8e1b226a9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:33:02 +0000 Subject: save a line --- src/rabbit_amqqueue_process.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index b32fa0ff..6c4c8654 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -422,7 +422,7 @@ gb_trees_cons(Key, Value, Tree) -> end. record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> - {no_confirm, State}; + {never, State}; record_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { @@ -431,10 +431,10 @@ record_confirm_message(#delivery{sender = ChPid, State = #q{msg_id_to_channel = MTC, q = #amqqueue{durable = true}}) -> - {confirm, + {eventually, State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; record_confirm_message(_Delivery, State) -> - {no_confirm, State}. + {immediately, State}. run_message_queue(State) -> Funs = {fun deliver_from_queue_pred/2, @@ -451,10 +451,9 @@ attempt_delivery(#delivery{txn = none, msg_seq_no = MsgSeqNo}, {NeedsConfirming, State = #q{backing_queue = BQ}}) -> %% must confirm immediately if it has a MsgSeqNo and not NeedsConfirming - case {NeedsConfirming, MsgSeqNo} of - {_, undefined} -> ok; - {no_confirm, _} -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); - {confirm, _} -> ok + case NeedsConfirming of + immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); + _ -> ok end, PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = @@ -466,7 +465,7 @@ attempt_delivery(#delivery{txn = none, BQ:publish_delivered( AckRequired, Message, (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = (NeedsConfirming =:= confirm)}, + needs_confirming = (NeedsConfirming =:= eventually)}, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} @@ -493,7 +492,7 @@ deliver_or_enqueue(Delivery, State) -> BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ needs_confirming = - (NeedsConfirming =:= confirm)}, + (NeedsConfirming =:= eventually)}, BQS), {false, ensure_ttl_timer(State1#q{backing_queue_state = BQS1})} end. -- cgit v1.2.1 From 2115f9d744d058f93556d43c21eb1a66fcdea847 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:35:06 +0000 Subject: save another line. bonus --- src/rabbit_amqqueue_process.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 6c4c8654..cfef08a5 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -428,9 +428,8 @@ record_confirm_message(#delivery{sender = ChPid, message = #basic_message { is_persistent = true, id = MsgId}}, - State = - #q{msg_id_to_channel = MTC, - q = #amqqueue{durable = true}}) -> + State = #q{q = #amqqueue{durable = true}, + msg_id_to_channel = MTC}) -> {eventually, State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; record_confirm_message(_Delivery, State) -> -- cgit v1.2.1 From 41ce63c6dab98f8ce0b80bfcd52d86e1a53ef23d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 8 Mar 2011 14:11:11 +0000 Subject: Don't change the version file format --- src/rabbit_upgrade.erl | 49 ++++++++++++++++++------------------------------- 1 file changed, 18 insertions(+), 31 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f59dbdfe..8113bad8 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -240,50 +240,44 @@ maybe_upgrade_local() -> read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> case is_new_version(V) of - false -> {ok, convert_old_version(V)}; - %% Write in this format for future expansion; - %% we want to allow plugins to own upgrades. - true -> [{rabbit, RV}] = V, - {ok, RV} - end; + {ok, [V]} -> {ok, V}; {error, _} = Err -> Err end. read_version(Scope) -> case read_version() of {error, _} = E -> E; - {ok, V} -> {ok, orddict:fetch(Scope, V)} + {ok, V} -> {ok, filter_by_scope(Scope, V)} end. write_version() -> - ok = rabbit_misc:write_term_file(schema_filename(), - [[{rabbit, desired_version()}]]), + ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), ok. write_version(Scope) -> {ok, V0} = read_version(), - V = orddict:store(Scope, desired_version(Scope), V0), - ok = rabbit_misc:write_term_file(schema_filename(), [[{rabbit, V}]]), + V = flatten([case S of + Scope -> desired_version(S); + _ -> filter_by_scope(S, V0) + end || S <- ?SCOPES]), + ok = rabbit_misc:write_term_file(schema_filename(), [V]), ok. desired_version() -> - lists:foldl( - fun (Scope, Acc) -> - orddict:store(Scope, desired_version(Scope), Acc) - end, - orddict:new(), ?SCOPES). + flatten([desired_version(Scope) || Scope <- ?SCOPES]). desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). -convert_old_version(Heads) -> - Locals = [add_queue_ttl], - V0 = orddict:new(), - V1 = orddict:store(mnesia, Heads -- Locals, V0), - orddict:store(local, - lists:filter(fun(H) -> lists:member(H, Locals) end, Heads), - V1). +flatten(LoL) -> + lists:sort(lists:flatten(LoL)). + +filter_by_scope(Scope, Versions) -> + with_upgrade_graph( + fun(G) -> + ScopeVs = digraph:vertices(G), + [V || V <- Versions, lists:member(V, ScopeVs)] + end, Scope). %% ------------------------------------------------------------------- @@ -399,10 +393,3 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% NB: we cannot use rabbit_log here since it may not have been %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). - -is_new_version(Version) -> - try - orddict:size(Version) > 0 - catch error:badarg -> - false - end. -- cgit v1.2.1 From b610ff9f65d1289904f34a49a242fe843db094cc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 8 Mar 2011 14:19:40 +0000 Subject: Use lists:append/1. --- src/rabbit_upgrade.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 8113bad8..1284d229 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -270,7 +270,7 @@ desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). flatten(LoL) -> - lists:sort(lists:flatten(LoL)). + lists:sort(lists:append(LoL)). filter_by_scope(Scope, Versions) -> with_upgrade_graph( -- cgit v1.2.1 From 72fabd498be215a5a95927a5448d079bdfb4fe05 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 8 Mar 2011 15:56:23 +0000 Subject: Tweaked accumulator again - no need to store the entire path --- src/rabbit_exchange_type_topic.erl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 7cff129c..65518287 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -85,7 +85,7 @@ remove_bindings(true, X, Bs) -> [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {Node, {[{Node, W}, {Parent, _} | _], 0, 0}} + {Node, {Parent, W, {0, 0}}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> @@ -101,25 +101,26 @@ maybe_add_path(X, Path = [{Node, _} | _], PathAcc) -> decrement_bindings(X, Path, PathAcc) -> with_path_acc(X, - fun({_Path, Bindings, Edges}) -> - {Path, Bindings - 1, Edges} + fun({Bindings, Edges}) -> + {Bindings - 1, Edges} end, Path, PathAcc). decrement_edges(X, Path, PathAcc) -> with_path_acc(X, - fun({_Path, Bindings, Edges}) -> - {Path, Bindings, Edges - 1} + fun({Bindings, Edges}) -> + {Bindings, Edges - 1} end, Path, PathAcc). with_path_acc(_X, _Fun, [{root, none}], PathAcc) -> PathAcc; with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> - NewVal = Fun(gb_trees:get(Node, PathAcc)), - NewPathAcc = gb_trees:update(Node, NewVal, PathAcc), - case NewVal of - {_, 0, 0} -> + {Parent, W, Counts} = gb_trees:get(Node, PathAcc), + NewCounts = Fun(Counts), + NewPathAcc = gb_trees:update(Node, {Parent, W, NewCounts}, PathAcc), + case NewCounts of + {0, 0} -> decrement_edges(X, ParentPath, maybe_add_path(X, ParentPath, NewPathAcc)); _ -> -- cgit v1.2.1 From 291ae6f85aa36cb2dc14f6f864509b82502c71a1 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 8 Mar 2011 16:18:22 +0000 Subject: Fix bad path_entry creation --- src/rabbit_exchange_type_topic.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 65518287..ffab0fcb 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -127,8 +127,8 @@ with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> NewPathAcc end. -path_entry(X, Path = [{Node, _} | _]) -> - {Path, trie_binding_count(X, Node), trie_child_count(X, Node)}. +path_entry(X, Path = [{Node, W}, {Parent, _} | _]) -> + {Parent, W, {trie_binding_count(X, Node), trie_child_count(X, Node)}}. binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). -- cgit v1.2.1 From 39db9d66cc5ec7390df89fd815939dd96d0ae903 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 8 Mar 2011 16:25:43 +0000 Subject: Pin the URL comment. --- src/file_handle_cache.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 304f9335..b26bb988 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1187,7 +1187,7 @@ ulimit() -> case os:type() of {win32, _OsName} -> %% On Windows max_fds is twice the number of open files: - %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 + %% https://github.com/yrashk/erlang/blob/e1282325ed75e52a98d5/erts/emulator/sys/win32/sys.c#L2459-2466 MaxFds div 2; _Any -> %% For other operating systems trust Erlang. -- cgit v1.2.1 From b52a76575e3e7e875a8d9cababfb9247490f716c Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 8 Mar 2011 22:20:43 +0000 Subject: cosmetic changes and some inlining --- src/rabbit_exchange_type_topic.erl | 45 ++++++++++++++------------------------ 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffab0fcb..a7d36533 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -69,48 +69,42 @@ add_binding(false, _Exchange, _Binding) -> remove_bindings(true, X, Bs) -> %% The remove process is split into two distinct phases. In the - %% first phase, we first gather the lists of bindings and edges to + %% first phase we gather the lists of bindings and edges to %% delete, then in the second phase we process all the %% deletions. This is to prevent interleaving of read/write %% operations in mnesia that can adversely affect performance. {ToDelete, Paths} = lists:foldl( - fun(B = #binding{destination = D}, {Acc, PathAcc}) -> - Path = [{FinalNode, _} | _] = binding_path(B), - PathAcc1 = decrement_bindings(X, Path, - maybe_add_path(X, Path, - PathAcc)), - {[{FinalNode, D} | Acc], PathAcc1} + fun(#binding{source = S, key = K, destination = D}, {Acc, PathAcc}) -> + Path = [{FinalNode, _} | _] = + follow_down_get_path(S, split_topic_key(K)), + {[{FinalNode, D} | Acc], + decrement_bindings(X, Path, maybe_add_path(X, Path, PathAcc))} end, {[], gb_trees:empty()}, Bs), [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {Node, {Parent, W, {0, 0}}} - <- gb_trees:to_list(Paths)], + {Node, {Parent, W, {0, 0}}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> ok. maybe_add_path(_X, [{root, none}], PathAcc) -> PathAcc; -maybe_add_path(X, Path = [{Node, _} | _], PathAcc) -> +maybe_add_path(X, [{Node, W}, {Parent, _} | _], PathAcc) -> case gb_trees:is_defined(Node, PathAcc) of true -> PathAcc; - false -> gb_trees:insert(Node, path_entry(X, Path), PathAcc) + false -> gb_trees:insert(Node, {Parent, W, {trie_binding_count(X, Node), + trie_child_count(X, Node)}}, + PathAcc) end. decrement_bindings(X, Path, PathAcc) -> - with_path_acc(X, - fun({Bindings, Edges}) -> - {Bindings - 1, Edges} - end, + with_path_acc(X, fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, Path, PathAcc). decrement_edges(X, Path, PathAcc) -> - with_path_acc(X, - fun({Bindings, Edges}) -> - {Bindings, Edges - 1} - end, + with_path_acc(X, fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, Path, PathAcc). with_path_acc(_X, _Fun, [{root, none}], PathAcc) -> @@ -120,18 +114,11 @@ with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> NewCounts = Fun(Counts), NewPathAcc = gb_trees:update(Node, {Parent, W, NewCounts}, PathAcc), case NewCounts of - {0, 0} -> - decrement_edges(X, ParentPath, - maybe_add_path(X, ParentPath, NewPathAcc)); - _ -> - NewPathAcc + {0, 0} -> decrement_edges(X, ParentPath, + maybe_add_path(X, ParentPath, NewPathAcc)); + _ -> NewPathAcc end. -path_entry(X, Path = [{Node, W}, {Parent, _} | _]) -> - {Parent, W, {trie_binding_count(X, Node), trie_child_count(X, Node)}}. - -binding_path(#binding{source = X, key = K}) -> - follow_down_get_path(X, split_topic_key(K)). assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). -- cgit v1.2.1 From 4fd145f52df6e05353e5b0cfb3b30fb9081a50a7 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 9 Mar 2011 00:14:21 +0000 Subject: close channel when test finishes --- src/rabbit_tests.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 4ad35696..3416fe0d 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1290,13 +1290,16 @@ test_confirms() -> after 1000 -> ok end, - %% Delete queue + %% Cleanup rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), receive #'queue.delete_ok'{} -> ok after 1000 -> throw(failed_to_cleanup_queue) end, + unlink(Ch), + ok = rabbit_channel:shutdown(Ch), + passed. test_statistics() -> -- cgit v1.2.1 From 3141efa589d2cb4097e16fd744b1bccf43d6e270 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 9 Mar 2011 09:30:15 +0000 Subject: change if to case Mhm. --- src/rabbit_channel.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 8afa2d8d..f584ff32 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -548,10 +548,10 @@ remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack, Qs1 = gb_sets:del_element(QPid, Qs), %% If QPid somehow died initiating a nack, clear the message from %% internal data-structures. Also, cleanup empty entries. - Empty = gb_sets:is_empty(Qs1), - if (Empty orelse Nack) -> + case (Nack orelse gb_sets:is_empty(Qs1)) of + true -> {[{MsgSeqNo, XName} | MXs], gb_trees:delete(MsgSeqNo, UMQ), UQM1}; - true -> + false -> {MXs, gb_trees:update(MsgSeqNo, {XName, Qs1}, UMQ), UQM1} end. -- cgit v1.2.1 From 22007275cb3d133e047a291510d716b23fe05dfb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 11:17:27 +0000 Subject: Correct upgrade step --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d1307b85..c75ecf86 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -294,7 +294,7 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({multiple_routing_keys, []}). +-rabbit_upgrade({multiple_routing_keys, local, []}). -ifdef(use_specs). -- cgit v1.2.1 From efc937c34e3984ab25a053ff240d8d8e0034f5b6 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 9 Mar 2011 11:31:47 +0000 Subject: Fixed bug in the QPid tests. My tests were passing because I used an exchange name in both add and remove calls --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index a7d36533..ffd1e583 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -67,7 +67,7 @@ add_binding(true, _Exchange, Binding) -> add_binding(false, _Exchange, _Binding) -> ok. -remove_bindings(true, X, Bs) -> +remove_bindings(true, #exchange{name = X}, Bs) -> %% The remove process is split into two distinct phases. In the %% first phase we gather the lists of bindings and edges to %% delete, then in the second phase we process all the -- cgit v1.2.1 From a3f01f3123c3c4b5d5ab6353a5121b5a1d5a999c Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 9 Mar 2011 11:53:39 +0000 Subject: remove misleading comment --- src/rabbit_amqqueue_process.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index cfef08a5..89d2e0cb 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -449,7 +449,6 @@ attempt_delivery(#delivery{txn = none, message = Message, msg_seq_no = MsgSeqNo}, {NeedsConfirming, State = #q{backing_queue = BQ}}) -> - %% must confirm immediately if it has a MsgSeqNo and not NeedsConfirming case NeedsConfirming of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok -- cgit v1.2.1 From 694a2c6d44074a9541126c1625f6aa8834357272 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 13:22:39 +0000 Subject: save 2 lines: no one cares about the result of deliver_or_enqueue, so don't bother with one --- src/rabbit_amqqueue_process.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 89d2e0cb..54c92dc7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -483,7 +483,7 @@ attempt_delivery(#delivery{txn = Txn, deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of {true, _, State1} -> - {true, State1}; + State1; {false, NeedsConfirming, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, @@ -492,7 +492,7 @@ deliver_or_enqueue(Delivery, State) -> needs_confirming = (NeedsConfirming =:= eventually)}, BQS), - {false, ensure_ttl_timer(State1#q{backing_queue_state = BQS1})} + ensure_ttl_timer(State1#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> @@ -822,8 +822,7 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. gen_server2:reply(From, true), - {_Delivered, NewState} = deliver_or_enqueue(Delivery, State), - noreply(NewState); + noreply(deliver_or_enqueue(Delivery, State)); handle_call({commit, Txn, ChPid}, From, State) -> case lookup_ch(ChPid) of @@ -985,8 +984,7 @@ handle_cast(sync_timeout, State) -> handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. - {_Delivered, NewState} = deliver_or_enqueue(Delivery, State), - noreply(NewState); + noreply(deliver_or_enqueue(Delivery, State)); handle_cast({ack, Txn, AckTags, ChPid}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> -- cgit v1.2.1 -- cgit v1.2.1 From 3ec780bc3ee6ab11be8dd920a9c1e2c495176e84 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Fri, 11 Mar 2011 11:29:54 +0000 Subject: Take write lock when counting records, reduces the constant factor during delete --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..6dfa1930 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -247,7 +247,7 @@ trie_binding_count(X, Node) -> _ = '_'}). count(Table, Match) -> - length(mnesia:match_object(Table, Match, read)). + length(mnesia:match_object(Table, Match, write)). trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, -- cgit v1.2.1 From cebd128e876c49e6d7e91da3ccc10aba1bb3c5b3 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Fri, 11 Mar 2011 14:06:21 +0000 Subject: Add timestamps to error_logger messages --- src/rabbit_error_logger.erl | 3 ++- src/rabbit_misc.erl | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 0120f0d6..33dfcef9 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -69,6 +69,7 @@ publish(_Other, _Format, _Data, _State) -> publish1(RoutingKey, Format, Data, LogExch) -> {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, - #'P_basic'{content_type = <<"text/plain">>}, + #'P_basic'{content_type = <<"text/plain">>, + timestamp = rabbit_misc:timestamp()}, list_to_binary(io_lib:format(Format, Data))), ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index e79a58a1..713498c8 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -52,7 +52,7 @@ unlink_and_capture_exit/1]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). --export([now_ms/0]). +-export([now_ms/0, timestamp/0]). -export([lock_file/1]). -export([const_ok/1, const/1]). -export([ntoa/1, ntoab/1]). @@ -190,6 +190,7 @@ {bad_edge, [digraph:vertex()]}), digraph:vertex(), digraph:vertex()})). -spec(now_ms/0 :: () -> non_neg_integer()). +-spec(timestamp/0 ::() -> non_neg_integer()). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). -spec(const_ok/1 :: (any()) -> 'ok'). -spec(const/1 :: (A) -> const(A)). @@ -199,6 +200,7 @@ -endif. +-define(EPOCH, {{1970, 1, 1}, {0, 0, 0}}). %%---------------------------------------------------------------------------- method_record_type(Record) -> @@ -791,6 +793,10 @@ get_flag(_, []) -> now_ms() -> timer:now_diff(now(), {0,0,0}) div 1000. +timestamp() -> + calendar:datetime_to_gregorian_seconds(erlang:universaltime()) - + calendar:datetime_to_gregorian_seconds(?EPOCH). + module_attributes(Module) -> case catch Module:module_info(attributes) of {'EXIT', {undef, [{Module, module_info, _} | _]}} -> -- cgit v1.2.1 From 2eac13788895c688a19e27a30f26001cf489491a Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 11 Mar 2011 15:50:12 +0000 Subject: Backed out changeset 2ac4e46ab7c0 changing the lock kind may have undesirable effects in a concurrent setting. The efficiency gains don't justify taking that risk, at least not without further investigation. --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 6dfa1930..ffd1e583 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -247,7 +247,7 @@ trie_binding_count(X, Node) -> _ = '_'}). count(Table, Match) -> - length(mnesia:match_object(Table, Match, write)). + length(mnesia:match_object(Table, Match, read)). trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, -- cgit v1.2.1 From 1d60ab309f7de265a4e8cc6ca905685e5ac04af9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 10:48:28 +0000 Subject: Fix --- src/file_handle_cache.erl | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index b26bb988..eed62729 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -970,12 +970,13 @@ queue_fold(Fun, Init, Q) -> filter_pending(Fun, {Count, Queue}) -> {Delta, Queue1} = - queue_fold(fun (Item, {DeltaN, QueueN}) -> - case Fun(Item) of - true -> {DeltaN, queue:in(Item, QueueN)}; - false -> {DeltaN - requested(Item), QueueN} - end - end, {0, queue:new()}, Queue), + queue_fold( + fun (Item = #pending { requested = Requested }, {DeltaN, QueueN}) -> + case Fun(Item) of + true -> {DeltaN, queue:in(Item, QueueN)}; + false -> {DeltaN - Requested, QueueN} + end + end, {0, queue:new()}, Queue), {Count + Delta, Queue1}. pending_new() -> @@ -1021,9 +1022,6 @@ adjust_alarm(OldState, NewState) -> end, NewState. -requested({_Kind, _Pid, Requested, _From}) -> - Requested. - process_pending(State = #fhc_state { limit = infinity }) -> State; process_pending(State) -> -- cgit v1.2.1 From 8492c18d696b0d4b384bfdc381d006e421351658 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Mar 2011 11:30:23 +0000 Subject: Maintain an acceptable level of code quality. --- src/rabbit_amqqueue_process.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 77706117..4f1f50a0 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -217,12 +217,11 @@ noreply(NewState) -> next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> {MsgIds, BQS1} = BQ:drain_confirmed(BQS), - BQNeedsSync = BQ:needs_idle_timeout(BQS1), State1 = ensure_stats_timer( ensure_rate_timer( confirm_messages(MsgIds, State#q{ backing_queue_state = BQS1}))), - case BQNeedsSync of + case BQ:needs_idle_timeout(BQS1) of true -> {ensure_sync_timer(State1), 0}; false -> {stop_sync_timer(State1), hibernate} end. -- cgit v1.2.1 From 09da0e495c4b327511c8cdbc763787fc409bcb81 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Mar 2011 11:43:15 +0000 Subject: I think this makes it more comprehensible. To me at least. --- src/rabbit_backing_queue.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 73850793..7823a53c 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -39,8 +39,9 @@ behaviour_info(callbacks) -> %% 2. a boolean indicating whether the queue is durable %% 3. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which can be invoked by the - %% backing queue when an event has occured that requires a + %% 4. an asynchronous callback which can be passed by the + %% backing queue to other processes which need to call back + %% into it when an event has occured that requires a %% state transition. The callback accepts a function from %% state to state. %% 5. a synchronous callback. Same as the asynchronous callback -- cgit v1.2.1 From 802d609c24e331f641e7afe7577676f88c22c46a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Mar 2011 11:51:26 +0000 Subject: I didn't think that implied that it could *only* be used out of process, but let's make it clearer. --- src/rabbit_backing_queue.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 7823a53c..29d9331b 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -42,8 +42,8 @@ behaviour_info(callbacks) -> %% 4. an asynchronous callback which can be passed by the %% backing queue to other processes which need to call back %% into it when an event has occured that requires a - %% state transition. The callback accepts a function from - %% state to state. + %% state transition. Note that it can also be used in process. + %% The callback accepts a function from state to state. %% 5. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. {init, 5}, -- cgit v1.2.1 From dc873bed6f3634945a8881a88d0d52091018b33d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 12:05:04 +0000 Subject: Adjusted test to hit code path --- src/rabbit_tests.erl | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 9547cae5..c2ed3fb0 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1628,23 +1628,38 @@ test_file_handle_cache() -> ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), + Src = filename:join(TmpDir, "file1"), + Dst = filename:join(TmpDir, "file2"), + Content = <<"foo">>, + CopyFun = fun () -> + ok = file:write_file(Src, Content), + {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), + {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), + Size = size(Content), + {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size), + ok = file_handle_cache:delete(SrcHdl), + ok = file_handle_cache:delete(DstHdl) + end, Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open( filename:join(TmpDir, "file3"), [write], []), - receive close -> ok end, - file_handle_cache:delete(Hdl) + receive {next, Pid1} -> Pid1 ! {next, self()} end, + file_handle_cache:delete(Hdl), + %% This will block and never return, so we + %% exercise the fhc tidying up the pending + %% queue on the death of a process. + ok = CopyFun() end), - Src = filename:join(TmpDir, "file1"), - Dst = filename:join(TmpDir, "file2"), - Content = <<"foo">>, - ok = file:write_file(Src, Content), - {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), - {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), - Size = size(Content), - {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size), - ok = file_handle_cache:delete(SrcHdl), - file_handle_cache:delete(DstHdl), - Pid ! close, + ok = CopyFun(), + ok = file_handle_cache:set_limit(3), + Pid ! {next, self()}, + receive {next, Pid} -> ok end, + erlang:monitor(process, Pid), + timer:sleep(500), + exit(Pid, kill), + receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, + file:delete(Src), + file:delete(Dst), ok = file_handle_cache:set_limit(Limit), passed. -- cgit v1.2.1 From 80854415c2e1579d52127b9722c985c77d0791e4 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 14 Mar 2011 12:09:43 +0000 Subject: more docs --- src/rabbit_backing_queue.erl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 29d9331b..a15ff846 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -39,11 +39,13 @@ behaviour_info(callbacks) -> %% 2. a boolean indicating whether the queue is durable %% 3. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which can be passed by the - %% backing queue to other processes which need to call back - %% into it when an event has occured that requires a - %% state transition. Note that it can also be used in process. - %% The callback accepts a function from state to state. + %% 4. an asynchronous callback which accepts a function from + %% state to state and invokes it with the current backing + %% queue state. This is useful for handling events, e.g. when + %% the backing queue does not have its own process to receive + %% such events, or when the processing of an event results in + %% a state transition the queue logic needs to know about + %% (such as messages getting confirmed). %% 5. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. {init, 5}, -- cgit v1.2.1 From e4edc17159a885dc118938ccff3ffe5da93160d6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 16:56:52 +0000 Subject: Whoops - add missing catch case which I just hit in testing... --- src/gm.erl | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index fd8d9b77..8cf22581 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -931,6 +931,12 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group) -> prune_or_create_group(Self, GroupName)); Alive -> Left = lists:nth(random:uniform(length(Alive)), Alive), + Handler = + fun () -> + join_group( + Self, GroupName, + record_dead_member_in_group(Left, GroupName)) + end, try case gen_server2:call( Left, {add_on_right, Self}, infinity) of @@ -940,9 +946,10 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group) -> catch exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown -> - join_group( - Self, GroupName, - record_dead_member_in_group(Left, GroupName)) + Handler(); + exit:{{R, _}, _} + when R =:= nodedown; R =:= shutdown -> + Handler() end end end. -- cgit v1.2.1 From 0cad8ee6ed090daa4ef510762a7cc24bf7b38a3a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 17:55:47 +0000 Subject: Start the GC before we rebuild the index, and store it in the State --- src/rabbit_msg_store.erl | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 4f5d2411..1bc4fd6b 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -646,6 +646,15 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit), + {ok, GCPid} = rabbit_msg_store_gc:start_link( + #gc_state { dir = Dir, + index_module = IndexModule, + index_state = IndexState, + file_summary_ets = FileSummaryEts, + file_handles_ets = FileHandlesEts, + msg_store = self() + }), + State = #msstate { dir = Dir, index_module = IndexModule, index_state = IndexState, @@ -657,7 +666,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> sum_valid_data = 0, sum_file_size = 0, pending_gc_completion = orddict:new(), - gc_pid = undefined, + gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, dedup_cache_ets = DedupCacheEts, @@ -680,15 +689,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> {ok, Offset} = file_handle_cache:position(CurHdl, Offset), ok = file_handle_cache:truncate(CurHdl), - {ok, GCPid} = rabbit_msg_store_gc:start_link( - #gc_state { dir = Dir, - index_module = IndexModule, - index_state = IndexState, - file_summary_ets = FileSummaryEts, - file_handles_ets = FileHandlesEts, - msg_store = self() - }), - {ok, maybe_compact( State1 #msstate { current_file_handle = CurHdl, gc_pid = GCPid }), hibernate, -- cgit v1.2.1 From a465d0703f0a9269e1a7b9635a7c450ab8ee2e57 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 14 Mar 2011 19:27:20 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 1bc4fd6b..25a20a96 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -689,8 +689,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> {ok, Offset} = file_handle_cache:position(CurHdl, Offset), ok = file_handle_cache:truncate(CurHdl), - {ok, maybe_compact( - State1 #msstate { current_file_handle = CurHdl, gc_pid = GCPid }), + {ok, maybe_compact(State1 #msstate { current_file_handle = CurHdl }), hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -- cgit v1.2.1 From 9af92c9bf4c5f05d59353206a668c25f2443b7bc Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 14 Mar 2011 19:28:20 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 4f5d2411..7cc499d1 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -716,15 +716,15 @@ handle_call(successfully_recovered_state, _From, State) -> reply(State #msstate.successfully_recovered, State); handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, - State = #msstate { dir = Dir, - index_state = IndexState, - index_module = IndexModule, - file_handles_ets = FileHandlesEts, - file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, - cur_file_cache_ets = CurFileCacheEts, - clients = Clients, - gc_pid = GCPid }) -> + State = #msstate { dir = Dir, + index_state = IndexState, + index_module = IndexModule, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts, + clients = Clients, + gc_pid = GCPid }) -> Clients1 = dict:store(CRef, {MsgOnDiskFun, CloseFDsFun}, Clients), reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts}, -- cgit v1.2.1 From 0b06bcaf2eed27870dcdf16e538c88751eee7527 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 23:34:29 +0000 Subject: fix --- src/file_handle_cache.erl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index b26bb988..e8e86c7c 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1149,11 +1149,14 @@ notify_age(CStates, AverageAge) -> end, CStates). notify_age0(Clients, CStates, Required) -> - Notifications = - [CState || CState <- CStates, CState#cstate.callback =/= undefined], - {L1, L2} = lists:split(random:uniform(length(Notifications)), - Notifications), - notify(Clients, Required, L2 ++ L1). + case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of + [] -> + ok; + Notifications -> + {L1, L2} = lists:split(random:uniform(length(Notifications)), + Notifications), + notify(Clients, Required, L2 ++ L1) + end. notify(_Clients, _Required, []) -> ok; -- cgit v1.2.1 From 6d7121d192dd59108c58574b63db04ec8a34c345 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 23:48:35 +0000 Subject: Ensure we hit both branches of fhc:filter_pending --- src/rabbit_tests.erl | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index d5956c4c..87c905d7 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1628,10 +1628,12 @@ test_file_handle_cache() -> ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), - Src = filename:join(TmpDir, "file1"), - Dst = filename:join(TmpDir, "file2"), + Src1 = filename:join(TmpDir, "file1"), + Dst1 = filename:join(TmpDir, "file2"), + Src2 = filename:join(TmpDir, "file3"), + Dst2 = filename:join(TmpDir, "file4"), Content = <<"foo">>, - CopyFun = fun () -> + CopyFun = fun (Src, Dst) -> ok = file:write_file(Src, Content), {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), @@ -1648,18 +1650,22 @@ test_file_handle_cache() -> %% This will block and never return, so we %% exercise the fhc tidying up the pending %% queue on the death of a process. - ok = CopyFun() + ok = CopyFun(Src1, Dst1) end), - ok = CopyFun(), - ok = file_handle_cache:set_limit(3), + ok = CopyFun(Src1, Dst1), + ok = file_handle_cache:set_limit(2), Pid ! {next, self()}, receive {next, Pid} -> ok end, + timer:sleep(100), + Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end), + timer:sleep(100), erlang:monitor(process, Pid), - timer:sleep(500), + erlang:monitor(process, Pid1), exit(Pid, kill), + exit(Pid1, kill), receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, - file:delete(Src), - file:delete(Dst), + receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end, + [file:delete(File) || File <- [Src1, Dst1, Src2, Dst2]], ok = file_handle_cache:set_limit(Limit), passed. -- cgit v1.2.1 From cc5413d3b863c14ccca00b2a4feb9c99589da170 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Mar 2011 17:00:28 +0000 Subject: Read segment files in one go --- src/rabbit_queue_index.erl | 79 ++++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 38 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 8227e4cd..d3a82fbf 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -545,20 +545,22 @@ expiry_to_binary(Expiry) -> <>. read_pub_record_body(Hdl) -> case file_handle_cache:read(Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of - {ok, Bin} -> - %% work around for binary data fragmentation. See - %% rabbit_msg_file:read_next/2 - <> = Bin, - <> = <>, - Exp = case Expiry of - ?NO_EXPIRY -> undefined; - X -> X - end, - {MsgId, #message_properties{expiry = Exp}}; - Error -> - Error + {ok, Bin} -> {MsgId, MsgProps, <<>>} = extract_pub_record_body(Bin), + {MsgId, MsgProps}; + Error -> Error end. +extract_pub_record_body(<>) -> + %% work around for binary data fragmentation. See + %% rabbit_msg_file:read_next/2 + <> = <>, + Exp = case Expiry of + ?NO_EXPIRY -> undefined; + X -> X + end, + {MsgId, #message_properties{expiry = Exp}, Rest}. + %%---------------------------------------------------------------------------- %% journal manipulation %%---------------------------------------------------------------------------- @@ -845,36 +847,37 @@ load_segment(KeepAcked, #segment { path = Path }) -> false -> {array_new(), 0}; true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []), {ok, 0} = file_handle_cache:position(Hdl, bof), - Res = load_segment_entries(KeepAcked, Hdl, array_new(), 0), + {ok, SegData} = file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE), + Res = load_segment_entries(KeepAcked, SegData, array_new(), 0), ok = file_handle_cache:close(Hdl), Res end. -load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> - case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of - {ok, <>} -> - {MsgId, MsgProps} = read_pub_record_body(Hdl), - Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, - SegEntries1 = array:set(RelSeq, Obj, SegEntries), - load_segment_entries(KeepAcked, Hdl, SegEntries1, - UnackedCount + 1); - {ok, <>} -> - {UnackedCountDelta, SegEntries1} = - case array:get(RelSeq, SegEntries) of - {Pub, no_del, no_ack} -> - { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)}; - {Pub, del, no_ack} when KeepAcked -> - {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)}; - {_Pub, del, no_ack} -> - {-1, array:reset(RelSeq, SegEntries)} - end, - load_segment_entries(KeepAcked, Hdl, SegEntries1, - UnackedCount + UnackedCountDelta); - _ErrOrEoF -> - {SegEntries, UnackedCount} - end. +load_segment_entries(KeepAcked, + <>, + SegEntries, UnackedCount) -> + {MsgId, MsgProps, SegData1} = extract_pub_record_body(SegData), + Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, + SegEntries1 = array:set(RelSeq, Obj, SegEntries), + load_segment_entries(KeepAcked, SegData1, SegEntries1, UnackedCount + 1); +load_segment_entries(KeepAcked, + <>, + SegEntries, UnackedCount) -> + {UnackedCountDelta, SegEntries1} = + case array:get(RelSeq, SegEntries) of + {Pub, no_del, no_ack} -> + { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)}; + {Pub, del, no_ack} when KeepAcked -> + {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)}; + {_Pub, del, no_ack} -> + {-1, array:reset(RelSeq, SegEntries)} + end, + load_segment_entries(KeepAcked, SegData, SegEntries1, + UnackedCount + UnackedCountDelta); +load_segment_entries(_KeepAcked, _SegData, SegEntries, UnackedCount) -> + {SegEntries, UnackedCount}. array_new() -> array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]). -- cgit v1.2.1 From 2919b96d6576a0fa2bbe9c075d200c3061027850 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 15 Mar 2011 17:21:19 +0000 Subject: cosmetic --- src/file_handle_cache.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index e8e86c7c..4f036571 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1150,12 +1150,10 @@ notify_age(CStates, AverageAge) -> notify_age0(Clients, CStates, Required) -> case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of - [] -> - ok; - Notifications -> - {L1, L2} = lists:split(random:uniform(length(Notifications)), - Notifications), - notify(Clients, Required, L2 ++ L1) + [] -> ok; + Notifications -> S = random:uniform(length(Notifications)), + {L1, L2} = lists:split(S, Notifications), + notify(Clients, Required, L2 ++ L1) end. notify(_Clients, _Required, []) -> -- cgit v1.2.1 From 0e03d63fa6b9236744374041738f9c59182be325 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 15 Mar 2011 19:26:15 +0000 Subject: initialise #connection.capabilities - not strictly necessary, but good form --- src/rabbit_reader.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 5afe5560..609bb43f 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -201,7 +201,8 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, timeout_sec = ?HANDSHAKE_TIMEOUT, frame_max = ?FRAME_MIN_SIZE, vhost = none, - client_properties = none}, + client_properties = none, + capabilities = []}, callback = uninitialized_callback, recv_length = 0, recv_ref = none, -- cgit v1.2.1 From eb0205d4bc8408d63736b13a6f7e6f5ba2879eee Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 15 Mar 2011 20:00:56 +0000 Subject: inlining and some minor refactoring --- src/rabbit_queue_index.erl | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index d3a82fbf..c342101c 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -543,13 +543,6 @@ create_pub_record_body(MsgId, #message_properties{expiry = Expiry}) -> expiry_to_binary(undefined) -> <>; expiry_to_binary(Expiry) -> <>. -read_pub_record_body(Hdl) -> - case file_handle_cache:read(Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of - {ok, Bin} -> {MsgId, MsgProps, <<>>} = extract_pub_record_body(Bin), - {MsgId, MsgProps}; - Error -> Error - end. - extract_pub_record_body(<>) -> %% work around for binary data fragmentation. See @@ -682,15 +675,18 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> ?ACK_JPREFIX -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> - case read_pub_record_body(Hdl) of - {MsgId, MsgProps} -> - Publish = {MsgId, MsgProps, - case Prefix of - ?PUB_PERSIST_JPREFIX -> true; - ?PUB_TRANS_JPREFIX -> false - end}, + case file_handle_cache:read( + Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of + {ok, Bin} -> + {MsgId, MsgProps, <<>>} = + extract_pub_record_body(Bin), + IsPersistent = case Prefix of + ?PUB_PERSIST_JPREFIX -> true; + ?PUB_TRANS_JPREFIX -> false + end, load_journal_entries( - add_to_journal(SeqId, Publish, State)); + add_to_journal( + SeqId, {MsgId, MsgProps, IsPersistent}, State)); _ErrOrEoF -> %% err, we've lost at least a publish State end -- cgit v1.2.1 From 11681ae6031b69432626e7d92a699b07dd021c95 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 07:18:40 +0000 Subject: cosmetic --- src/rabbit_auth_backend_internal.erl | 24 ++++++++++-------------- src/rabbit_auth_mechanism_cr_demo.erl | 12 +++++------- src/rabbit_auth_mechanism_plain.erl | 5 +---- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index 3d005845..f70813d1 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -85,10 +85,9 @@ check_user_login(Username, []) -> internal_check_user_login(Username, fun(_) -> true end); check_user_login(Username, [{password, Password}]) -> internal_check_user_login( - Username, - fun(#internal_user{password_hash = Hash}) -> - check_password(Password, Hash) - end); + Username, fun(#internal_user{password_hash = Hash}) -> + check_password(Password, Hash) + end); check_user_login(Username, AuthProps) -> exit({unknown_auth_props, Username, AuthProps}). @@ -131,12 +130,11 @@ check_resource_access(#user{username = Username}, [] -> false; [#user_permission{permission = P}] -> - PermRegexp = - case element(permission_index(Permission), P) of - %% <<"^$">> breaks Emacs' erlang mode - <<"">> -> <<$^, $$>>; - RE -> RE - end, + PermRegexp = case element(permission_index(Permission), P) of + %% <<"^$">> breaks Emacs' erlang mode + <<"">> -> <<$^, $$>>; + RE -> RE + end, case re:run(Name, PermRegexp, [{capture, none}]) of match -> true; nomatch -> false @@ -221,11 +219,9 @@ salted_md5(Salt, Cleartext) -> Salted = <>, erlang:md5(Salted). -set_admin(Username) -> - set_admin(Username, true). +set_admin(Username) -> set_admin(Username, true). -clear_admin(Username) -> - set_admin(Username, false). +clear_admin(Username) -> set_admin(Username, false). set_admin(Username, IsAdmin) -> R = update_user(Username, fun(User) -> diff --git a/src/rabbit_auth_mechanism_cr_demo.erl b/src/rabbit_auth_mechanism_cr_demo.erl index 77aa34ea..acbb6e48 100644 --- a/src/rabbit_auth_mechanism_cr_demo.erl +++ b/src/rabbit_auth_mechanism_cr_demo.erl @@ -53,10 +53,8 @@ handle_response(Response, State = #state{username = undefined}) -> {challenge, <<"Please tell me your password">>, State#state{username = Response}}; -handle_response(Response, #state{username = Username}) -> - case Response of - <<"My password is ", Password/binary>> -> - rabbit_access_control:check_user_pass_login(Username, Password); - _ -> - {protocol_error, "Invalid response '~s'", [Response]} - end. +handle_response(<<"My password is ", Password/binary>>, + #state{username = Username}) -> + rabbit_access_control:check_user_pass_login(Username, Password); +handle_response(Response, _State) -> + {protocol_error, "Invalid response '~s'", [Response]}. diff --git a/src/rabbit_auth_mechanism_plain.erl b/src/rabbit_auth_mechanism_plain.erl index e2f9bff9..2448acb6 100644 --- a/src/rabbit_auth_mechanism_plain.erl +++ b/src/rabbit_auth_mechanism_plain.erl @@ -65,15 +65,12 @@ extract_user_pass(Response) -> end. extract_elem(<<0:8, Rest/binary>>) -> - Count = next_null_pos(Rest), + Count = next_null_pos(Rest, 0), <> = Rest, {ok, Elem, Rest1}; extract_elem(_) -> error. -next_null_pos(Bin) -> - next_null_pos(Bin, 0). - next_null_pos(<<>>, Count) -> Count; next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count; next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1). -- cgit v1.2.1 From 8422782861d5a7bef197d048f0b59da14b516d37 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 07:44:29 +0000 Subject: cosmetic --- src/rabbit_queue_index.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index c342101c..75423f80 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -843,7 +843,8 @@ load_segment(KeepAcked, #segment { path = Path }) -> false -> {array_new(), 0}; true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []), {ok, 0} = file_handle_cache:position(Hdl, bof), - {ok, SegData} = file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE), + {ok, SegData} = file_handle_cache:read( + Hdl, ?SEGMENT_TOTAL_SIZE), Res = load_segment_entries(KeepAcked, SegData, array_new(), 0), ok = file_handle_cache:close(Hdl), Res -- cgit v1.2.1 From de97e192c944e6d4e1d1917808f41a5ede61e642 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 09:06:46 +0000 Subject: avoid sub-binary construction of tail and some cosmetics --- src/rabbit_queue_index.erl | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 75423f80..83079ca8 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -140,8 +140,11 @@ -define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes -define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)). -%% 16 bytes for md5sum + 8 for expiry + 2 for seq, bits and prefix --define(PUBLISH_RECORD_LENGTH_BYTES, ?MSG_ID_BYTES + ?EXPIRY_BYTES + 2). + +%% 16 bytes for md5sum + 8 for expiry +-define(PUBLISH_RECORD_BODY_LENGTH_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)). +%% + 2 for seq, bits and prefix +-define(PUBLISH_RECORD_LENGTH_BYTES, (?PUBLISH_RECORD_BODY_LENGTH_BYTES + 2)). %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * @@ -537,14 +540,13 @@ queue_index_walker_reader(QueueName, Gatherer) -> %% expiry/binary manipulation %%---------------------------------------------------------------------------- -create_pub_record_body(MsgId, #message_properties{expiry = Expiry}) -> +create_pub_record_body(MsgId, #message_properties { expiry = Expiry }) -> [MsgId, expiry_to_binary(Expiry)]. expiry_to_binary(undefined) -> <>; expiry_to_binary(Expiry) -> <>. -extract_pub_record_body(<>) -> +parse_pub_record_body(<>) -> %% work around for binary data fragmentation. See %% rabbit_msg_file:read_next/2 <> = <>, @@ -552,7 +554,7 @@ extract_pub_record_body(< undefined; X -> X end, - {MsgId, #message_properties{expiry = Exp}, Rest}. + {MsgId, #message_properties { expiry = Exp }}. %%---------------------------------------------------------------------------- %% journal manipulation @@ -676,10 +678,9 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> case file_handle_cache:read( - Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of + Hdl, ?PUBLISH_RECORD_BODY_LENGTH_BYTES) of {ok, Bin} -> - {MsgId, MsgProps, <<>>} = - extract_pub_record_body(Bin), + {MsgId, MsgProps} = parse_pub_record_body(Bin), IsPersistent = case Prefix of ?PUB_PERSIST_JPREFIX -> true; ?PUB_TRANS_JPREFIX -> false @@ -852,12 +853,14 @@ load_segment(KeepAcked, #segment { path = Path }) -> load_segment_entries(KeepAcked, <>, + RelSeq:?REL_SEQ_BITS, + PubRecordBody:?PUBLISH_RECORD_BODY_LENGTH_BYTES/binary, + SegData/binary>>, SegEntries, UnackedCount) -> - {MsgId, MsgProps, SegData1} = extract_pub_record_body(SegData), + {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody), Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, SegEntries1 = array:set(RelSeq, Obj, SegEntries), - load_segment_entries(KeepAcked, SegData1, SegEntries1, UnackedCount + 1); + load_segment_entries(KeepAcked, SegData, SegEntries1, UnackedCount + 1); load_segment_entries(KeepAcked, <>, -- cgit v1.2.1 From 7108a811678ee47166482095d7715a7196ed413f Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 09:20:37 +0000 Subject: cosmetic: more consistent naming of constants --- src/rabbit_queue_index.erl | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 83079ca8..33c5391b 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -126,13 +126,13 @@ %% (range: 0 - 16383) -define(REL_SEQ_ONLY_PREFIX, 00). -define(REL_SEQ_ONLY_PREFIX_BITS, 2). --define(REL_SEQ_ONLY_ENTRY_LENGTH_BYTES, 2). +-define(REL_SEQ_ONLY_RECORD_BYTES, 2). %% publish record is binary 1 followed by a bit for is_persistent, %% then 14 bits of rel seq id, 64 bits for message expiry and 128 bits %% of md5sum msg id --define(PUBLISH_PREFIX, 1). --define(PUBLISH_PREFIX_BITS, 1). +-define(PUB_PREFIX, 1). +-define(PUB_PREFIX_BITS, 1). -define(EXPIRY_BYTES, 8). -define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)). @@ -142,14 +142,13 @@ -define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)). %% 16 bytes for md5sum + 8 for expiry --define(PUBLISH_RECORD_BODY_LENGTH_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)). +-define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)). %% + 2 for seq, bits and prefix --define(PUBLISH_RECORD_LENGTH_BYTES, (?PUBLISH_RECORD_BODY_LENGTH_BYTES + 2)). +-define(PUB_RECORD_BYTES, (?PUB_RECORD_BODY_BYTES + 2)). %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * - (?PUBLISH_RECORD_LENGTH_BYTES + - (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). + (?PUB_RECORD_BYTES + (2 * ?REL_SEQ_ONLY_RECORD_BYTES))). %% ---- misc ---- @@ -677,8 +676,7 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> ?ACK_JPREFIX -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> - case file_handle_cache:read( - Hdl, ?PUBLISH_RECORD_BODY_LENGTH_BYTES) of + case file_handle_cache:read(Hdl, ?PUB_RECORD_BODY_BYTES) of {ok, Bin} -> {MsgId, MsgProps} = parse_pub_record_body(Bin), IsPersistent = case Prefix of @@ -797,7 +795,7 @@ write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> ok; {MsgId, MsgProps, IsPersistent} -> file_handle_cache:append( - Hdl, [<>, create_pub_record_body(MsgId, MsgProps)]) @@ -852,9 +850,9 @@ load_segment(KeepAcked, #segment { path = Path }) -> end. load_segment_entries(KeepAcked, - <>, SegEntries, UnackedCount) -> {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody), @@ -1011,11 +1009,11 @@ add_queue_ttl_journal(< stop. -add_queue_ttl_segment(<>) -> - {[<>, MsgId, expiry_to_binary(undefined)], Rest}; + {[<>, + MsgId, expiry_to_binary(undefined)], Rest}; add_queue_ttl_segment(<>) -> {<>, -- cgit v1.2.1 From ade1d061c1f3ac97a02324f121e8ac1b03311ff5 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 16 Mar 2011 10:58:31 +0000 Subject: don't record anything confirm-related for immediate unroutable messages --- src/rabbit_amqqueue_process.erl | 76 +++++++++++++++++++++++++++-------------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 7c4b5190..5dbc8828 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -439,19 +439,24 @@ gb_trees_cons(Key, Value, Tree) -> none -> gb_trees:insert(Key, [Value], Tree) end. -record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> - {never, State}; -record_confirm_message(#delivery{sender = ChPid, +should_confirm_message(#delivery{msg_seq_no = undefined}, _State) -> + never; +should_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { is_persistent = true, id = MsgId}}, - State = #q{q = #amqqueue{durable = true}, - msg_id_to_channel = MTC}) -> - {eventually, - State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; -record_confirm_message(_Delivery, State) -> - {immediately, State}. + #q{q = #amqqueue{durable = true}}) -> + {eventually, ChPid, MsgSeqNo, MsgId}; +should_confirm_message(_Delivery, _State) -> + immediately. + +record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, + State = #q{msg_id_to_channel = MTC}) -> + State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}; +record_confirm_message(Confirm, State) + when Confirm =:= immediately orelse Confirm =:= never -> + State. run_message_queue(State) -> Funs = {fun deliver_from_queue_pred/2, @@ -466,8 +471,9 @@ attempt_delivery(#delivery{txn = none, sender = ChPid, message = Message, msg_seq_no = MsgSeqNo}, - {NeedsConfirming, State = #q{backing_queue = BQ}}) -> - case NeedsConfirming of + Confirm, + State = #q{backing_queue = BQ}) -> + case Confirm of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok end, @@ -477,40 +483,50 @@ attempt_delivery(#delivery{txn = none, %% we don't need an expiry here because messages are %% not being enqueued, so we use an empty %% message_properties. + NeedsConfirming = case Confirm of + {eventually, _, _, _} -> true; + _ -> false + end, {AckTag, BQS1} = BQ:publish_delivered( AckRequired, Message, (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = (NeedsConfirming =:= eventually)}, + needs_confirming = NeedsConfirming}, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} end, {Delivered, State1} = deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), - {Delivered, NeedsConfirming, State1}; + {Delivered, Confirm, State1}; attempt_delivery(#delivery{txn = Txn, sender = ChPid, message = Message}, - {NeedsConfirming, State = #q{backing_queue = BQ, - backing_queue_state = BQS}}) -> + Confirm, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), - {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}. + {true, Confirm, State#q{backing_queue_state = BQS1}}. deliver_or_enqueue(Delivery, State) -> - case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of - {true, _, State1} -> - State1; - {false, NeedsConfirming, State1 = #q{backing_queue = BQ, + case attempt_delivery(Delivery, + should_confirm_message(Delivery, State), State) of + {true, Confirm, State1} -> + record_confirm_message(Confirm, State1); + {false, Confirm, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, + NeedsConfirming = case Confirm of + {eventually, _, _, _} -> true; + _ -> false + end, BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ - needs_confirming = - (NeedsConfirming =:= eventually)}, + needs_confirming = NeedsConfirming}, BQS), - ensure_ttl_timer(State1#q{backing_queue_state = BQS1}) + State2 = record_confirm_message(Confirm, State1), + ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> @@ -829,9 +845,17 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Delivered, _NeedsConfirming, State1} = - attempt_delivery(Delivery, record_confirm_message(Delivery, State)), - reply(Delivered, State1); + {Delivered, Confirm, State1} = + attempt_delivery(Delivery, + should_confirm_message(Delivery, State), + State), + State2 = case {Confirm, Delivered} of + {{eventually, _, _, _}, true} -> + record_confirm_message(Confirm, State); + _ -> + State1 + end, + reply(Delivered, State2); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. -- cgit v1.2.1 From 157d3f401c729df060b32327385211b27a0e0105 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 16 Mar 2011 11:19:17 +0000 Subject: refactor --- src/rabbit_amqqueue_process.erl | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 5dbc8828..96352c13 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -451,6 +451,9 @@ should_confirm_message(#delivery{sender = ChPid, should_confirm_message(_Delivery, _State) -> immediately. +needs_confirming({eventually, _, _, _}) -> true; +needs_confirming(_) -> false. + record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, State = #q{msg_id_to_channel = MTC}) -> State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}; @@ -483,15 +486,11 @@ attempt_delivery(#delivery{txn = none, %% we don't need an expiry here because messages are %% not being enqueued, so we use an empty %% message_properties. - NeedsConfirming = case Confirm of - {eventually, _, _, _} -> true; - _ -> false - end, {AckTag, BQS1} = BQ:publish_delivered( AckRequired, Message, (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = NeedsConfirming}, + needs_confirming = needs_confirming(Confirm)}, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} @@ -517,13 +516,9 @@ deliver_or_enqueue(Delivery, State) -> {false, Confirm, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, - NeedsConfirming = case Confirm of - {eventually, _, _, _} -> true; - _ -> false - end, BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ - needs_confirming = NeedsConfirming}, + needs_confirming = needs_confirming(Confirm)}, BQS), State2 = record_confirm_message(Confirm, State1), ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) @@ -849,11 +844,9 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> attempt_delivery(Delivery, should_confirm_message(Delivery, State), State), - State2 = case {Confirm, Delivered} of - {{eventually, _, _, _}, true} -> - record_confirm_message(Confirm, State); - _ -> - State1 + State2 = case Delivered andalso needs_confirming(Confirm) of + true -> record_confirm_message(Confirm, State); + false -> State1 end, reply(Delivered, State2); -- cgit v1.2.1 From a6586a1c333ce2499a787c07ec7cb2c8a2cfc180 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 12:58:11 +0000 Subject: Transplant reader_pid vs connection_pid work from bug23350. --- src/rabbit_channel.erl | 39 ++++++++++++++++++++------------------- src/rabbit_channel_sup.erl | 15 ++++++++------- src/rabbit_direct.erl | 14 +++++++------- 3 files changed, 35 insertions(+), 33 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index da103284..b27f6886 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -20,7 +20,7 @@ -behaviour(gen_server2). --export([start_link/9, do/2, do/3, flush/1, shutdown/1]). +-export([start_link/10, do/2, do/3, flush/1, shutdown/1]). -export([send_command/2, deliver/4, flushed/2, confirm/2]). -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). -export([emit_stats/1, ready_for_close/1]). @@ -29,9 +29,9 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2]). --record(ch, {state, protocol, channel, reader_pid, writer_pid, limiter_pid, - start_limiter_fun, transaction_id, tx_participants, next_tag, - uncommitted_ack_q, unacked_message_q, +-record(ch, {state, protocol, channel, reader_pid, writer_pid, connection_pid, + limiter_pid, start_limiter_fun, transaction_id, tx_participants, + next_tag, uncommitted_ack_q, unacked_message_q, user, virtual_host, most_recently_declared_queue, consumer_mapping, blocking, consumer_monitors, queue_collector_pid, stats_timer, confirm_enabled, publish_seqno, unconfirmed_mq, @@ -67,8 +67,8 @@ -type(channel_number() :: non_neg_integer()). --spec(start_link/9 :: - (channel_number(), pid(), pid(), rabbit_types:protocol(), +-spec(start_link/10 :: + (channel_number(), pid(), pid(), pid(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> rabbit_types:ok_pid_or_error()). @@ -96,11 +96,11 @@ %%---------------------------------------------------------------------------- -start_link(Channel, ReaderPid, WriterPid, Protocol, User, VHost, Capabilities, - CollectorPid, StartLimiterFun) -> +start_link(Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, + Capabilities, CollectorPid, StartLimiterFun) -> gen_server2:start_link( - ?MODULE, [Channel, ReaderPid, WriterPid, Protocol, User, VHost, - Capabilities, CollectorPid, StartLimiterFun], []). + ?MODULE, [Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, + VHost, Capabilities, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> do(Pid, Method, none). @@ -154,8 +154,8 @@ ready_for_close(Pid) -> %%--------------------------------------------------------------------------- -init([Channel, ReaderPid, WriterPid, Protocol, User, VHost, Capabilities, - CollectorPid, StartLimiterFun]) -> +init([Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, + Capabilities, CollectorPid, StartLimiterFun]) -> process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), StatsTimer = rabbit_event:init_stats_timer(), @@ -164,6 +164,7 @@ init([Channel, ReaderPid, WriterPid, Protocol, User, VHost, Capabilities, channel = Channel, reader_pid = ReaderPid, writer_pid = WriterPid, + connection_pid = ConnectionPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, transaction_id = none, @@ -1410,13 +1411,13 @@ coalesce_and_send(MsgSeqNos, MkMsgFun, infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. -i(pid, _) -> self(); -i(connection, #ch{reader_pid = ReaderPid}) -> ReaderPid; -i(number, #ch{channel = Channel}) -> Channel; -i(user, #ch{user = User}) -> User#user.username; -i(vhost, #ch{virtual_host = VHost}) -> VHost; -i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; -i(confirm, #ch{confirm_enabled = CE}) -> CE; +i(pid, _) -> self(); +i(connection, #ch{connection_pid = Connection}) -> Connection; +i(number, #ch{channel = Channel}) -> Channel; +i(user, #ch{user = User}) -> User#user.username; +i(vhost, #ch{virtual_host = VHost}) -> VHost; +i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; +i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); i(messages_unconfirmed, #ch{unconfirmed_mq = UMQ}) -> diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 8175ad80..7eec0818 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -58,21 +58,22 @@ start_link({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, User, VHost, supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Channel, ReaderPid, WriterPid, Protocol, User, VHost, - Capabilities, Collector, start_limiter_fun(SupPid)]}, + [Channel, ReaderPid, WriterPid, ReaderPid, Protocol, + User, VHost, Capabilities, Collector, + start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), {ok, SupPid, {ChannelPid, AState}}; -start_link({direct, Channel, ClientChannelPid, Protocol, User, VHost, - Capabilities, Collector}) -> +start_link({direct, Channel, ClientChannelPid, ConnectionPid, Protocol, User, + VHost, Capabilities, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Channel, ClientChannelPid, ClientChannelPid, Protocol, - User, VHost, Capabilities, Collector, - start_limiter_fun(SupPid)]}, + [Channel, ClientChannelPid, ClientChannelPid, + ConnectionPid, Protocol, User, VHost, Capabilities, + Collector, start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index a2693c69..568cbea3 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -16,7 +16,7 @@ -module(rabbit_direct). --export([boot/0, connect/4, start_channel/7]). +-export([boot/0, connect/4, start_channel/8]). -include("rabbit.hrl"). @@ -28,8 +28,8 @@ -spec(connect/4 :: (binary(), binary(), binary(), rabbit_types:protocol()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). --spec(start_channel/7 :: - (rabbit_channel:channel_number(), pid(), rabbit_types:protocol(), +-spec(start_channel/8 :: + (rabbit_channel:channel_number(), pid(), pid(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}). @@ -69,11 +69,11 @@ connect(Username, Password, VHost, Protocol) -> {error, broker_not_found_on_node} end. -start_channel(Number, ClientChannelPid, Protocol, User, VHost, Capabilities, - Collector) -> +start_channel(Number, ClientChannelPid, ConnectionPid, Protocol, User, VHost, + Capabilities, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, Protocol, User, VHost, - Capabilities, Collector}]), + [{direct, Number, ClientChannelPid, ConnectionPid, Protocol, User, + VHost, Capabilities, Collector}]), {ok, ChannelPid}. -- cgit v1.2.1 From ff296ce8d6523280e25dbcb81b3fc82bdcaf7bb5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 13:12:37 +0000 Subject: Use the correct connection pid for exclusivity (and error logging). --- src/rabbit_channel.erl | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b27f6886..19b2eaf4 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -362,14 +362,15 @@ return_ok(State, false, Msg) -> {reply, Msg, State}. ok_msg(true, _Msg) -> undefined; ok_msg(false, Msg) -> Msg. -send_exception(Reason, State = #ch{protocol = Protocol, - channel = Channel, - writer_pid = WriterPid, - reader_pid = ReaderPid}) -> +send_exception(Reason, State = #ch{protocol = Protocol, + channel = Channel, + writer_pid = WriterPid, + reader_pid = ReaderPid, + connection_pid = ConnectionPid}) -> {CloseChannel, CloseMethod} = rabbit_binary_generator:map_exception(Channel, Reason, Protocol), rabbit_log:error("connection ~p, channel ~p - error:~n~p~n", - [ReaderPid, Channel, Reason]), + [ConnectionPid, Channel, Reason]), %% something bad's happened: rollback_and_notify may not be 'ok' {_Result, State1} = rollback_and_notify(State), case CloseChannel of @@ -650,13 +651,13 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, - _, State = #ch{writer_pid = WriterPid, - reader_pid = ReaderPid, - next_tag = DeliveryTag}) -> + _, State = #ch{writer_pid = WriterPid, + connection_pid = ConnectionPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> rabbit_amqqueue:basic_get(Q, self(), NoAck) end) of {ok, MessageCount, Msg = {_QName, QPid, _MsgId, Redelivered, @@ -690,7 +691,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, no_ack = NoAck, exclusive = ExclusiveConsume, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid, + _, State = #ch{connection_pid = ConnectionPid, limiter_pid = LimiterPid, consumer_mapping = ConsumerMapping}) -> case dict:find(ConsumerTag, ConsumerMapping) of @@ -707,7 +708,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, %% behalf. This is for symmetry with basic.cancel - see %% the comment in that method for why. case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> {rabbit_amqqueue:basic_consume( Q, NoAck, self(), LimiterPid, @@ -922,10 +923,10 @@ handle_method(#'queue.declare'{queue = QueueNameBin, nowait = NoWait, arguments = Args} = Declare, _, State = #ch{virtual_host = VHostPath, - reader_pid = ReaderPid, + connection_pid = ConnectionPid, queue_collector_pid = CollectorPid}) -> Owner = case ExclusiveDeclare of - true -> ReaderPid; + true -> ConnectionPid; false -> none end, ActualNameBin = case QueueNameBin of @@ -967,14 +968,14 @@ handle_method(#'queue.declare'{queue = QueueNameBin, handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, nowait = NoWait}, - _, State = #ch{virtual_host = VHostPath, - reader_pid = ReaderPid}) -> + _, State = #ch{virtual_host = VHostPath, + connection_pid = ConnectionPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = rabbit_amqqueue:with_or_die( QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end), - ok = rabbit_amqqueue:check_exclusive_access(Q, ReaderPid), + ok = rabbit_amqqueue:check_exclusive_access(Q, ConnectionPid), return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount, State); @@ -982,11 +983,11 @@ handle_method(#'queue.delete'{queue = QueueNameBin, if_unused = IfUnused, if_empty = IfEmpty, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid}) -> + _, State = #ch{connection_pid = ConnectionPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_configure_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> rabbit_amqqueue:delete(Q, IfUnused, IfEmpty) end) of {error, in_use} -> rabbit_misc:protocol_error( @@ -1018,11 +1019,11 @@ handle_method(#'queue.unbind'{queue = QueueNameBin, handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid}) -> + _, State = #ch{connection_pid = ConnectionPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> rabbit_amqqueue:purge(Q) end), return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); @@ -1142,8 +1143,8 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, - State = #ch{virtual_host = VHostPath, - reader_pid = ReaderPid}) -> + State = #ch{virtual_host = VHostPath, + connection_pid = ConnectionPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - @@ -1159,7 +1160,8 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, key = ActualRoutingKey, args = Arguments}, fun (_X, Q = #amqqueue{}) -> - try rabbit_amqqueue:check_exclusive_access(Q, ReaderPid) + try rabbit_amqqueue:check_exclusive_access(Q, + ConnectionPid) catch exit:Reason -> {error, Reason} end; (_X, #exchange{}) -> -- cgit v1.2.1 From a65e7a57cb8f5bf4c4fe562d3ac3dae2a06f7ffd Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 13:15:51 +0000 Subject: Try to make this branch vertical space neutral. Sadly we can't do this by planting a vertical space tree. --- src/rabbit_channel.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 19b2eaf4..370654a9 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1144,7 +1144,7 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath, - connection_pid = ConnectionPid }) -> + connection_pid = ConnPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - @@ -1160,8 +1160,7 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, key = ActualRoutingKey, args = Arguments}, fun (_X, Q = #amqqueue{}) -> - try rabbit_amqqueue:check_exclusive_access(Q, - ConnectionPid) + try rabbit_amqqueue:check_exclusive_access(Q, ConnPid) catch exit:Reason -> {error, Reason} end; (_X, #exchange{}) -> -- cgit v1.2.1 From 8c14e112021a54adcb32365f40f54c0ae766487e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 13:19:03 +0000 Subject: minor refactor of test --- src/rabbit_tests.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 87c905d7..505570e2 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1628,10 +1628,8 @@ test_file_handle_cache() -> ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), - Src1 = filename:join(TmpDir, "file1"), - Dst1 = filename:join(TmpDir, "file2"), - Src2 = filename:join(TmpDir, "file3"), - Dst2 = filename:join(TmpDir, "file4"), + [Src1, Dst1, Src2, Dst2] = Files = + [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]], Content = <<"foo">>, CopyFun = fun (Src, Dst) -> ok = file:write_file(Src, Content), @@ -1643,7 +1641,7 @@ test_file_handle_cache() -> ok = file_handle_cache:delete(DstHdl) end, Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open( - filename:join(TmpDir, "file3"), + filename:join(TmpDir, "file5"), [write], []), receive {next, Pid1} -> Pid1 ! {next, self()} end, file_handle_cache:delete(Hdl), @@ -1665,7 +1663,7 @@ test_file_handle_cache() -> exit(Pid1, kill), receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end, - [file:delete(File) || File <- [Src1, Dst1, Src2, Dst2]], + [file:delete(File) || File <- Files], ok = file_handle_cache:set_limit(Limit), passed. -- cgit v1.2.1 From 7556d6ae1e71ffc07dab7666216e94bbd91c1dec Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 13:36:31 +0000 Subject: refactorings --- src/rabbit_amqqueue_process.erl | 56 +++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 96352c13..4ebdb7a3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -454,11 +454,10 @@ should_confirm_message(_Delivery, _State) -> needs_confirming({eventually, _, _, _}) -> true; needs_confirming(_) -> false. -record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, - State = #q{msg_id_to_channel = MTC}) -> +maybe_record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, + State = #q{msg_id_to_channel = MTC}) -> State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}; -record_confirm_message(Confirm, State) - when Confirm =:= immediately orelse Confirm =:= never -> +maybe_record_confirm_message(_Confirm, State) -> State. run_message_queue(State) -> @@ -473,9 +472,9 @@ run_message_queue(State) -> attempt_delivery(#delivery{txn = none, sender = ChPid, message = Message, - msg_seq_no = MsgSeqNo}, - Confirm, + msg_seq_no = MsgSeqNo} = Delivery, State = #q{backing_queue = BQ}) -> + Confirm = should_confirm_message(Delivery, State), case Confirm of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok @@ -500,28 +499,26 @@ attempt_delivery(#delivery{txn = none, {Delivered, Confirm, State1}; attempt_delivery(#delivery{txn = Txn, sender = ChPid, - message = Message}, - Confirm, + message = Message} = Delivery, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), - {true, Confirm, State#q{backing_queue_state = BQS1}}. - -deliver_or_enqueue(Delivery, State) -> - case attempt_delivery(Delivery, - should_confirm_message(Delivery, State), State) of - {true, Confirm, State1} -> - record_confirm_message(Confirm, State1); - {false, Confirm, State1 = #q{backing_queue = BQ, - backing_queue_state = BQS}} -> - #delivery{message = Message} = Delivery, - BQS1 = BQ:publish(Message, - (message_properties(State)) #message_properties{ - needs_confirming = needs_confirming(Confirm)}, - BQS), - State2 = record_confirm_message(Confirm, State1), - ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) + {true, should_confirm_message(Delivery, State), + State#q{backing_queue_state = BQS1}}. + +deliver_or_enqueue(Delivery = #delivery{message = Message}, State) -> + {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), + State2 = #q{backing_queue = BQ, backing_queue_state = BQS} = + maybe_record_confirm_message(Confirm, State1), + case Delivered of + true -> State2; + false -> BQS1 = + BQ:publish(Message, + (message_properties(State)) #message_properties{ + needs_confirming = needs_confirming(Confirm)}, + BQS), + ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> @@ -840,15 +837,8 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Delivered, Confirm, State1} = - attempt_delivery(Delivery, - should_confirm_message(Delivery, State), - State), - State2 = case Delivered andalso needs_confirming(Confirm) of - true -> record_confirm_message(Confirm, State); - false -> State1 - end, - reply(Delivered, State2); + {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), + reply(Delivered, maybe_record_confirm_message(Confirm, State1)); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. -- cgit v1.2.1 From 046d25da345ae888beaf9cc1f4125e596bc5eac1 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 13:37:21 +0000 Subject: How could I forget those joyous uses of channel in tests? --- src/rabbit_tests.erl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 930923e8..b8c3f4a9 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1121,8 +1121,9 @@ test_server_status() -> %% create a few things so there is some useful information to list Writer = spawn(fun () -> receive shutdown -> ok end end), {ok, Ch} = rabbit_channel:start_link( - 1, self(), Writer, rabbit_framing_amqp_0_9_1, user(<<"user">>), - <<"/">>, [], self(), fun (_) -> {ok, self()} end), + 1, self(), Writer, self(), rabbit_framing_amqp_0_9_1, + user(<<"user">>), <<"/">>, [], self(), + fun (_) -> {ok, self()} end), [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], {new, Queue = #amqqueue{}} <- [rabbit_amqqueue:declare( @@ -1181,8 +1182,9 @@ test_spawn(Receiver) -> Me = self(), Writer = spawn(fun () -> Receiver(Me) end), {ok, Ch} = rabbit_channel:start_link( - 1, Me, Writer, rabbit_framing_amqp_0_9_1, user(<<"guest">>), - <<"/">>, [], self(), fun (_) -> {ok, self()} end), + 1, Me, Writer, Me, rabbit_framing_amqp_0_9_1, + user(<<"guest">>), <<"/">>, [], self(), + fun (_) -> {ok, self()} end), ok = rabbit_channel:do(Ch, #'channel.open'{}), receive #'channel.open_ok'{} -> ok after 1000 -> throw(failed_to_receive_channel_open_ok) -- cgit v1.2.1 From b87031eaa490a155e8737c0904b2ce9c62542cb8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 14:29:58 +0000 Subject: Say "ConnPid" everywhere. --- src/rabbit_channel.erl | 64 +++++++++++++++++++++++----------------------- src/rabbit_channel_sup.erl | 10 ++++---- src/rabbit_direct.erl | 6 ++--- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 370654a9..2d2d9d60 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -29,7 +29,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2]). --record(ch, {state, protocol, channel, reader_pid, writer_pid, connection_pid, +-record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, limiter_pid, start_limiter_fun, transaction_id, tx_participants, next_tag, uncommitted_ack_q, unacked_message_q, user, virtual_host, most_recently_declared_queue, @@ -96,10 +96,10 @@ %%---------------------------------------------------------------------------- -start_link(Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, +start_link(Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun) -> gen_server2:start_link( - ?MODULE, [Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, + ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> @@ -154,7 +154,7 @@ ready_for_close(Pid) -> %%--------------------------------------------------------------------------- -init([Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, +init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun]) -> process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), @@ -164,7 +164,7 @@ init([Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, channel = Channel, reader_pid = ReaderPid, writer_pid = WriterPid, - connection_pid = ConnectionPid, + conn_pid = ConnPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, transaction_id = none, @@ -362,15 +362,15 @@ return_ok(State, false, Msg) -> {reply, Msg, State}. ok_msg(true, _Msg) -> undefined; ok_msg(false, Msg) -> Msg. -send_exception(Reason, State = #ch{protocol = Protocol, - channel = Channel, - writer_pid = WriterPid, - reader_pid = ReaderPid, - connection_pid = ConnectionPid}) -> +send_exception(Reason, State = #ch{protocol = Protocol, + channel = Channel, + writer_pid = WriterPid, + reader_pid = ReaderPid, + conn_pid = ConnPid}) -> {CloseChannel, CloseMethod} = rabbit_binary_generator:map_exception(Channel, Reason, Protocol), rabbit_log:error("connection ~p, channel ~p - error:~n~p~n", - [ConnectionPid, Channel, Reason]), + [ConnPid, Channel, Reason]), %% something bad's happened: rollback_and_notify may not be 'ok' {_Result, State1} = rollback_and_notify(State), case CloseChannel of @@ -652,12 +652,12 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, _, State = #ch{writer_pid = WriterPid, - connection_pid = ConnectionPid, - next_tag = DeliveryTag}) -> + conn_pid = ConnPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> rabbit_amqqueue:basic_get(Q, self(), NoAck) end) of {ok, MessageCount, Msg = {_QName, QPid, _MsgId, Redelivered, @@ -691,7 +691,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, no_ack = NoAck, exclusive = ExclusiveConsume, nowait = NoWait}, - _, State = #ch{connection_pid = ConnectionPid, + _, State = #ch{conn_pid = ConnPid, limiter_pid = LimiterPid, consumer_mapping = ConsumerMapping}) -> case dict:find(ConsumerTag, ConsumerMapping) of @@ -708,7 +708,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, %% behalf. This is for symmetry with basic.cancel - see %% the comment in that method for why. case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> {rabbit_amqqueue:basic_consume( Q, NoAck, self(), LimiterPid, @@ -923,10 +923,10 @@ handle_method(#'queue.declare'{queue = QueueNameBin, nowait = NoWait, arguments = Args} = Declare, _, State = #ch{virtual_host = VHostPath, - connection_pid = ConnectionPid, + conn_pid = ConnPid, queue_collector_pid = CollectorPid}) -> Owner = case ExclusiveDeclare of - true -> ConnectionPid; + true -> ConnPid; false -> none end, ActualNameBin = case QueueNameBin of @@ -969,13 +969,13 @@ handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, nowait = NoWait}, _, State = #ch{virtual_host = VHostPath, - connection_pid = ConnectionPid}) -> + conn_pid = ConnPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = rabbit_amqqueue:with_or_die( QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end), - ok = rabbit_amqqueue:check_exclusive_access(Q, ConnectionPid), + ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid), return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount, State); @@ -983,11 +983,11 @@ handle_method(#'queue.delete'{queue = QueueNameBin, if_unused = IfUnused, if_empty = IfEmpty, nowait = NoWait}, - _, State = #ch{connection_pid = ConnectionPid}) -> + _, State = #ch{conn_pid = ConnPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_configure_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> rabbit_amqqueue:delete(Q, IfUnused, IfEmpty) end) of {error, in_use} -> rabbit_misc:protocol_error( @@ -1019,11 +1019,11 @@ handle_method(#'queue.unbind'{queue = QueueNameBin, handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait}, - _, State = #ch{connection_pid = ConnectionPid}) -> + _, State = #ch{conn_pid = ConnPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> rabbit_amqqueue:purge(Q) end), return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); @@ -1144,7 +1144,7 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath, - connection_pid = ConnPid }) -> + conn_pid = ConnPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - @@ -1412,13 +1412,13 @@ coalesce_and_send(MsgSeqNos, MkMsgFun, infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. -i(pid, _) -> self(); -i(connection, #ch{connection_pid = Connection}) -> Connection; -i(number, #ch{channel = Channel}) -> Channel; -i(user, #ch{user = User}) -> User#user.username; -i(vhost, #ch{virtual_host = VHost}) -> VHost; -i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; -i(confirm, #ch{confirm_enabled = CE}) -> CE; +i(pid, _) -> self(); +i(connection, #ch{conn_pid = ConnPid}) -> ConnPid; +i(number, #ch{channel = Channel}) -> Channel; +i(user, #ch{user = User}) -> User#user.username; +i(vhost, #ch{virtual_host = VHost}) -> VHost; +i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; +i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); i(messages_unconfirmed, #ch{unconfirmed_mq = UMQ}) -> diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 7eec0818..65ccca02 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -64,16 +64,16 @@ start_link({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, User, VHost, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), {ok, SupPid, {ChannelPid, AState}}; -start_link({direct, Channel, ClientChannelPid, ConnectionPid, Protocol, User, - VHost, Capabilities, Collector}) -> +start_link({direct, Channel, ClientChannelPid, ConnPid, Protocol, User, VHost, + Capabilities, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Channel, ClientChannelPid, ClientChannelPid, - ConnectionPid, Protocol, User, VHost, Capabilities, - Collector, start_limiter_fun(SupPid)]}, + [Channel, ClientChannelPid, ClientChannelPid, ConnPid, + Protocol, User, VHost, Capabilities, Collector, + start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 568cbea3..0810c762 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -69,11 +69,11 @@ connect(Username, Password, VHost, Protocol) -> {error, broker_not_found_on_node} end. -start_channel(Number, ClientChannelPid, ConnectionPid, Protocol, User, VHost, +start_channel(Number, ClientChannelPid, ConnPid, Protocol, User, VHost, Capabilities, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, ConnectionPid, Protocol, User, - VHost, Capabilities, Collector}]), + [{direct, Number, ClientChannelPid, ConnPid, Protocol, User, VHost, + Capabilities, Collector}]), {ok, ChannelPid}. -- cgit v1.2.1 From f4e4bdc1bfefd70bf3d11b40ecda8f67727d2424 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 15:16:49 +0000 Subject: cosmetic --- src/rabbit_channel.erl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 2d2d9d60..0c12614c 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -651,9 +651,9 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, - _, State = #ch{writer_pid = WriterPid, - conn_pid = ConnPid, - next_tag = DeliveryTag}) -> + _, State = #ch{writer_pid = WriterPid, + conn_pid = ConnPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( @@ -968,8 +968,8 @@ handle_method(#'queue.declare'{queue = QueueNameBin, handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, nowait = NoWait}, - _, State = #ch{virtual_host = VHostPath, - conn_pid = ConnPid}) -> + _, State = #ch{virtual_host = VHostPath, + conn_pid = ConnPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = @@ -1143,8 +1143,8 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, - State = #ch{virtual_host = VHostPath, - conn_pid = ConnPid }) -> + State = #ch{virtual_host = VHostPath, + conn_pid = ConnPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - -- cgit v1.2.1 From ae2e8ee3a60753439654ea6feef90ca7df3a3096 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 17:30:30 +0000 Subject: Abstract and rewrite schema_version handling functions --- src/rabbit_mnesia.erl | 18 +++++---- src/rabbit_upgrade.erl | 96 ++++++++++++++------------------------------- src/rabbit_version.erl | 103 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+), 74 deletions(-) create mode 100644 src/rabbit_version.erl diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e61f5fce..fa442c9c 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -442,7 +442,7 @@ init_db(ClusterNodes, Force) -> {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + rpc:call(AnotherNode, rabbit_version, read, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), @@ -457,7 +457,8 @@ init_db(ClusterNodes, Force) -> %% If we're just starting up a new node we won't have %% a version version_not_available -> - ok = rabbit_upgrade:write_version() + ok = rabbit_version:write( + rabbit_upgrade:desired_version()) end, ensure_schema_integrity() end; @@ -484,13 +485,14 @@ schema_ok_or_move() -> end. ensure_version_ok({ok, DiscVersion}) -> - case rabbit_upgrade:desired_version() of - DiscVersion -> ok; - DesiredVersion -> throw({error, {schema_mismatch, - DesiredVersion, DiscVersion}}) + DesiredVersion = rabbit_upgrade:desired_version(), + case rabbit_version:'=~='(DesiredVersion, DiscVersion) of + true -> ok; + false -> throw({error, {schema_mismatch, + DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> - ok = rabbit_upgrade:write_version(). + ok = rabbit_version:write(rabbit_upgrade:desired_version()). create_schema() -> mnesia:stop(), @@ -500,7 +502,7 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = rabbit_upgrade:write_version(). + ok = rabbit_version:write(rabbit_upgrade:desired_version()). move_db() -> mnesia:stop(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f1134cfa..7a4a4fd8 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -17,8 +17,7 @@ -module(rabbit_upgrade). -export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). --export([read_version/0, write_version/0, desired_version/0, - desired_version/1]). +-export([desired_version/0]). -include("rabbit.hrl"). @@ -30,16 +29,9 @@ -ifdef(use_specs). --type(step() :: atom()). --type(version() :: [{scope(), [step()]}]). --type(scope() :: 'mnesia' | 'local'). - -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). --spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec(write_version/0 :: () -> 'ok'). --spec(desired_version/0 :: () -> version()). --spec(desired_version/1 :: (scope()) -> [step()]). +-spec(desired_version/0 :: () -> rabbit_version:version()). -endif. @@ -173,7 +165,7 @@ is_disc_node() -> %% This is pretty ugly but we can't start Mnesia and ask it (will hang), %% we can't look at the config file (may not include us even if we're a %% disc node). - filelib:is_regular(rabbit_mnesia:dir() ++ "/rabbit_durable_exchange.DCD"). + filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). die(Msg, Args) -> %% We don't throw or exit here since that gets thrown @@ -216,7 +208,7 @@ secondary_upgrade(AllNodes) -> end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = rabbit_mnesia:init_db(ClusterNodes, true), - ok = write_version(mnesia), + ok = write_desired_scope_version(mnesia), ok. nodes_running(Nodes) -> @@ -238,63 +230,37 @@ maybe_upgrade_local() -> fun() -> ok end) end. -read_version() -> - case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> {ok, V}; - {error, _} = Err -> Err - end. - -read_version(Scope) -> - case read_version() of - {error, _} = E -> E; - {ok, V} -> {ok, filter_by_scope(Scope, V)} - end. - -write_version() -> - ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), - ok. - -write_version(Scope) -> - {ok, V0} = read_version(), - V = flatten([case S of - Scope -> desired_version(S); - _ -> filter_by_scope(S, V0) - end || S <- ?SCOPES]), - ok = rabbit_misc:write_term_file(schema_filename(), [V]), - ok. - -desired_version() -> - flatten([desired_version(Scope) || Scope <- ?SCOPES]). +desired_version() -> [{Scope, desired_version(Scope)} || Scope <- ?SCOPES]. -desired_version(Scope) -> - with_upgrade_graph(fun (G) -> heads(G) end, Scope). +desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). -flatten(LoL) -> - lists:sort(lists:append(LoL)). - -filter_by_scope(Scope, Versions) -> - with_upgrade_graph( - fun(G) -> - ScopeVs = digraph:vertices(G), - [V || V <- Versions, lists:member(V, ScopeVs)] - end, Scope). +write_desired_scope_version(Scope) -> + ok = rabbit_version:with_scope_version( + Scope, + fun ({error, Error}) -> + throw({error, {can_not_read_version_to_write_it, Error}}) + end, + fun (_SV) -> {desired_version(Scope), ok} end). %% ------------------------------------------------------------------- upgrades_required(Scope) -> - case read_version(Scope) of - {ok, CurrentHeads} -> - with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> upgrades_to_apply(CurrentHeads, G); - Unknown -> throw({error, - {future_upgrades_found, Unknown}}) - end - end, Scope); - {error, enoent} -> - version_not_available - end. + rabbit_version:with_scope_version( + Scope, + fun ({error, enoent}) -> version_not_available end, + fun (CurrentHeads) -> + {CurrentHeads, + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> + upgrades_to_apply(CurrentHeads, G); + Unknown -> + throw({error, + {future_upgrades_found, Unknown}}) + end + end, Scope)} + end). with_upgrade_graph(Fun, Scope) -> case rabbit_misc:build_acyclic_graph( @@ -363,7 +329,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = write_version(Scope), + ok = write_desired_scope_version(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); @@ -386,8 +352,6 @@ apply_upgrade(Scope, {M, F}) -> dir() -> rabbit_mnesia:dir(). -schema_filename() -> filename:join(dir(), ?VERSION_FILENAME). - lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% NB: we cannot use rabbit_log here since it may not have been diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl new file mode 100644 index 00000000..c88d57fe --- /dev/null +++ b/src/rabbit_version.erl @@ -0,0 +1,103 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_version). + +-export([read/0, write/1, with_scope_version/3, '=~='/2]). + +%% ------------------------------------------------------------------- +-ifdef(use_specs). + +-export_type([step/0, version/0, scope/0]). + +-type(step() :: atom()). +-type(version() :: [{scope(), [step()]}]). +-type(scope() :: atom()). + +-spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). +-spec(write/1 :: (version()) -> 'ok'). +-spec(with_scope_version/3 :: + (scope(), + fun (({'error', any()}) -> E), + fun (([step()]) -> {[step()], A})) -> E | A). +-spec('=~='/2 :: (version(), version()) -> boolean()). + +-endif. +%% ------------------------------------------------------------------- + +-define(VERSION_FILENAME, "schema_version"). + +%% ------------------------------------------------------------------- + +read() -> + case rabbit_misc:read_term_file(schema_filename()) of + {ok, [V]} -> {ok, categorise_by_scope(V)}; + {error, _} = Err -> Err + end. + +write(Version) -> + V = [Name || {_Scope, Names} <- Version, Name <- Names], + ok = rabbit_misc:write_term_file(schema_filename(), [V]). + +with_scope_version(Scope, ErrorHandler, Fun) -> + case read() of + {error, _} = Err -> + ErrorHandler(Err); + {ok, Version} -> + SV = case lists:keysearch(Scope, 1, Version) of + false -> []; + {value, {Scope, SV1}} -> SV1 + end, + {SV2, Result} = Fun(SV), + ok = case SV =:= SV2 of + true -> ok; + false -> write(lists:keystore(Scope, 1, Version, + {Scope, SV2})) + end, + Result + end. + +'=~='(VerA, VerB) -> + matches(lists:usort(VerA), lists:usort(VerB)). + +%% ------------------------------------------------------------------- + +matches([], []) -> + true; +matches([{Scope, SV}|VerA], [{Scope, SV}|VerB]) -> + matches(VerA, VerB); +matches([{Scope, SVA}|VerA], [{Scope, SVB}|VerB]) -> + case {lists:usort(SVA), lists:usort(SVB)} of + {SV, SV} -> matches(VerA, VerB); + _ -> false + end; +matches(_VerA, _VerB) -> + false. + +categorise_by_scope(Heads) when is_list(Heads) -> + Categorised = + [{Scope, Name} || {_Module, Attributes} <- + rabbit_misc:all_module_attributes(rabbit_upgrade), + {Name, Scope, _Requires} <- Attributes, + lists:member(Name, Heads)], + orddict:to_list( + lists:foldl(fun ({Scope, Name}, Version) -> + rabbit_misc:orddict_cons(Scope, Name, Version) + end, orddict:new(), Categorised)). + +dir() -> rabbit_mnesia:dir(). + +schema_filename() -> filename:join(dir(), ?VERSION_FILENAME). -- cgit v1.2.1 From 7f05a48a9e4ebbd62d41e3be3d514f689318abeb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 18:02:15 +0000 Subject: english --- src/rabbit_upgrade.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 7a4a4fd8..9b2ffa28 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -238,7 +238,7 @@ write_desired_scope_version(Scope) -> ok = rabbit_version:with_scope_version( Scope, fun ({error, Error}) -> - throw({error, {can_not_read_version_to_write_it, Error}}) + throw({error, {cannot_read_version_to_write_it, Error}}) end, fun (_SV) -> {desired_version(Scope), ok} end). -- cgit v1.2.1 From f10c0b62e57dd2e5d0f2dd877e03dfd699298cc9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 18:13:19 +0000 Subject: ordering --- src/rabbit_version.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index c88d57fe..8c577f9c 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -23,9 +23,9 @@ -export_type([step/0, version/0, scope/0]). +-type(scope() :: atom()). -type(step() :: atom()). -type(version() :: [{scope(), [step()]}]). --type(scope() :: atom()). -spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write/1 :: (version()) -> 'ok'). -- cgit v1.2.1 From 221433535cd1551a83132d0a8d46440dd12ea433 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 00:52:11 +0000 Subject: incorporate qa feedback. The version.erl api is rather nice now: the version itself is entirely opaque - whilst it can be read, there's nothing provided to decompose it at all. --- src/rabbit_mnesia.erl | 9 ++- src/rabbit_upgrade.erl | 116 +++++++---------------------------- src/rabbit_version.erl | 160 +++++++++++++++++++++++++++++++++++-------------- 3 files changed, 139 insertions(+), 146 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index fa442c9c..4902cfeb 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -457,8 +457,7 @@ init_db(ClusterNodes, Force) -> %% If we're just starting up a new node we won't have %% a version version_not_available -> - ok = rabbit_version:write( - rabbit_upgrade:desired_version()) + ok = rabbit_version:write_desired_version() end, ensure_schema_integrity() end; @@ -485,14 +484,14 @@ schema_ok_or_move() -> end. ensure_version_ok({ok, DiscVersion}) -> - DesiredVersion = rabbit_upgrade:desired_version(), + DesiredVersion = rabbit_version:desired_version(), case rabbit_version:'=~='(DesiredVersion, DiscVersion) of true -> ok; false -> throw({error, {schema_mismatch, DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> - ok = rabbit_version:write(rabbit_upgrade:desired_version()). + ok = rabbit_version:write_desired_version(). create_schema() -> mnesia:stop(), @@ -502,7 +501,7 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = rabbit_version:write(rabbit_upgrade:desired_version()). + ok = rabbit_version:write_desired_version(). move_db() -> mnesia:stop(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9b2ffa28..9347cc53 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -17,13 +17,11 @@ -module(rabbit_upgrade). -export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). --export([desired_version/0]). -include("rabbit.hrl"). -define(VERSION_FILENAME, "schema_version"). -define(LOCK_FILENAME, "schema_upgrade_lock"). --define(SCOPES, [mnesia, local]). %% ------------------------------------------------------------------- @@ -31,7 +29,6 @@ -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). --spec(desired_version/0 :: () -> rabbit_version:version()). -endif. @@ -96,8 +93,8 @@ maybe_upgrade_mnesia() -> AllNodes = rabbit_mnesia:all_clustered_nodes(), - case upgrades_required(mnesia) of - version_not_available -> + case rabbit_version:upgrades_required(mnesia) of + {error, version_not_available} -> rabbit:prepare(), %% Ensure we have logs for this case AllNodes of [_] -> ok; @@ -105,9 +102,11 @@ maybe_upgrade_mnesia() -> "< 2.1.1.~nUnfortunately you will need to " "rebuild the cluster.", []) end; - [] -> + {error, _} = Err -> + throw(Err); + {ok, []} -> ok; - Upgrades -> + {ok, Upgrades} -> rabbit:prepare(), %% Ensure we have logs for this case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); @@ -142,18 +141,19 @@ upgrade_mode(AllNodes) -> end; [Another|_] -> ClusterVersion = - case rpc:call(Another, - rabbit_upgrade, desired_version, [mnesia]) of + case rpc:call(Another, rabbit_version, desired_scope_version, + [mnesia]) of {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; {badrpc, Reason} -> {unknown, Reason}; V -> V end, - case desired_version(mnesia) of - ClusterVersion -> + MyVersion = rabbit_version:desired_scope_version(mnesia), + case rabbit_version:'=~='(ClusterVersion, MyVersion) of + true -> %% The other node(s) have upgraded already, I am not the %% upgrader secondary; - MyVersion -> + false -> %% The other node(s) are running an unexpected version. die("Cluster upgrade needed but other nodes are " "running ~p~nand I want ~p", @@ -208,7 +208,7 @@ secondary_upgrade(AllNodes) -> end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = rabbit_mnesia:init_db(ClusterNodes, true), - ok = write_desired_scope_version(mnesia), + ok = rabbit_version:write_desired_scope_version(mnesia), ok. nodes_running(Nodes) -> @@ -223,90 +223,14 @@ node_running(Node) -> %% ------------------------------------------------------------------- maybe_upgrade_local() -> - case upgrades_required(local) of - version_not_available -> version_not_available; - [] -> ok; - Upgrades -> apply_upgrades(local, Upgrades, - fun() -> ok end) + case rabbit_version:upgrades_required(local) of + {error, version_not_available} -> version_not_available; + {error, _} = Err -> throw(Err); + {ok, []} -> ok; + {ok, Upgrades} -> apply_upgrades(local, Upgrades, + fun () -> ok end) end. -desired_version() -> [{Scope, desired_version(Scope)} || Scope <- ?SCOPES]. - -desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). - -write_desired_scope_version(Scope) -> - ok = rabbit_version:with_scope_version( - Scope, - fun ({error, Error}) -> - throw({error, {cannot_read_version_to_write_it, Error}}) - end, - fun (_SV) -> {desired_version(Scope), ok} end). - -%% ------------------------------------------------------------------- - -upgrades_required(Scope) -> - rabbit_version:with_scope_version( - Scope, - fun ({error, enoent}) -> version_not_available end, - fun (CurrentHeads) -> - {CurrentHeads, - with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> - upgrades_to_apply(CurrentHeads, G); - Unknown -> - throw({error, - {future_upgrades_found, Unknown}}) - end - end, Scope)} - end). - -with_upgrade_graph(Fun, Scope) -> - case rabbit_misc:build_acyclic_graph( - fun (Module, Steps) -> vertices(Module, Steps, Scope) end, - fun (Module, Steps) -> edges(Module, Steps, Scope) end, - rabbit_misc:all_module_attributes(rabbit_upgrade)) of - {ok, G} -> try - Fun(G) - after - true = digraph:delete(G) - end; - {error, {vertex, duplicate, StepName}} -> - throw({error, {duplicate_upgrade_step, StepName}}); - {error, {edge, {bad_vertex, StepName}, _From, _To}} -> - throw({error, {dependency_on_unknown_upgrade_step, StepName}}); - {error, {edge, {bad_edge, StepNames}, _From, _To}} -> - throw({error, {cycle_in_upgrade_steps, StepNames}}) - end. - -vertices(Module, Steps, Scope0) -> - [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps, - Scope0 == Scope1]. - -edges(_Module, Steps, Scope0) -> - [{Require, StepName} || {StepName, Scope1, Requires} <- Steps, - Require <- Requires, - Scope0 == Scope1]. -unknown_heads(Heads, G) -> - [H || H <- Heads, digraph:vertex(G, H) =:= false]. - -upgrades_to_apply(Heads, G) -> - %% Take all the vertices which can reach the known heads. That's - %% everything we've already applied. Subtract that from all - %% vertices: that's what we have to apply. - Unsorted = sets:to_list( - sets:subtract( - sets:from_list(digraph:vertices(G)), - sets:from_list(digraph_utils:reaching(Heads, G)))), - %% Form a subgraph from that list and find a topological ordering - %% so we can invoke them in order. - [element(2, digraph:vertex(G, StepName)) || - StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. - -heads(G) -> - lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). - %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> @@ -329,7 +253,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = write_desired_scope_version(Scope), + ok = rabbit_version:write_desired_scope_version(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index 8c577f9c..2d7ba8e4 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -16,86 +16,156 @@ -module(rabbit_version). --export([read/0, write/1, with_scope_version/3, '=~='/2]). +-export([read/0, '=~='/2, desired_version/0, desired_scope_version/1, + write_desired_version/0, write_desired_scope_version/1, + upgrades_required/1]). %% ------------------------------------------------------------------- -ifdef(use_specs). --export_type([step/0, version/0, scope/0]). +-export_type([scope/0, step/0, scope_version/0]). -type(scope() :: atom()). --type(step() :: atom()). --type(version() :: [{scope(), [step()]}]). +-type(scope_version() :: [atom()]). +-type(step() :: {atom(), atom()}). + +-type(version() :: [atom()]). -spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec(write/1 :: (version()) -> 'ok'). --spec(with_scope_version/3 :: - (scope(), - fun (({'error', any()}) -> E), - fun (([step()]) -> {[step()], A})) -> E | A). -spec('=~='/2 :: (version(), version()) -> boolean()). +-spec(desired_version/0 :: () -> version()). +-spec(desired_scope_version/1 :: (scope()) -> scope_version()). +-spec(write_desired_version/0 :: () -> 'ok'). +-spec(write_desired_scope_version/1 :: + (scope()) -> rabbit_types:ok_or_error(any())). +-spec(upgrades_required/1 :: + (scope()) -> rabbit_types:ok_or_error2([step()], any())). -endif. %% ------------------------------------------------------------------- -define(VERSION_FILENAME, "schema_version"). +-define(SCOPES, [mnesia, local]). %% ------------------------------------------------------------------- -read() -> - case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> {ok, categorise_by_scope(V)}; - {error, _} = Err -> Err - end. +read() -> case rabbit_misc:read_term_file(schema_filename()) of + {ok, [V]} -> {ok, V}; + {error, _} = Err -> Err + end. -write(Version) -> - V = [Name || {_Scope, Names} <- Version, Name <- Names], - ok = rabbit_misc:write_term_file(schema_filename(), [V]). +write(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]). -with_scope_version(Scope, ErrorHandler, Fun) -> +read_scope_version(Scope) -> case read() of {error, _} = Err -> - ErrorHandler(Err); + Err; {ok, Version} -> - SV = case lists:keysearch(Scope, 1, Version) of + {ok, case lists:keysearch(Scope, 1, categorise_by_scope(Version)) of false -> []; {value, {Scope, SV1}} -> SV1 - end, - {SV2, Result} = Fun(SV), - ok = case SV =:= SV2 of - true -> ok; - false -> write(lists:keystore(Scope, 1, Version, - {Scope, SV2})) - end, - Result + end} end. +write_scope_version(Scope, ScopeVersion) -> + case read() of + {error, _} = Err -> + Err; + {ok, Version} -> + Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version), + {Scope, ScopeVersion}), + ok = write([Name || {_Scope, Names} <- Version1, Name <- Names]) + end. + +%% ------------------------------------------------------------------- + '=~='(VerA, VerB) -> - matches(lists:usort(VerA), lists:usort(VerB)). + lists:usort(VerA) =:= lists:usort(VerB). + +%% ------------------------------------------------------------------- + +desired_version() -> + [Name || Scope <- ?SCOPES, Name <- desired_scope_version(Scope)]. + +desired_scope_version(Scope) -> with_upgrade_graph(fun heads/1, Scope). + +write_desired_version() -> write(desired_version()). + +write_desired_scope_version(Scope) -> + write_scope_version(Scope, desired_scope_version(Scope)). + +upgrades_required(Scope) -> + case read_scope_version(Scope) of + {error, enoent} -> + {error, version_not_available}; + {ok, CurrentHeads} -> + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> {ok, upgrades_to_apply(CurrentHeads, G)}; + Unknown -> {error, {future_upgrades_found, Unknown}} + end + end, Scope) + end. + +%% ------------------------------------------------------------------- + +with_upgrade_graph(Fun, Scope) -> + case rabbit_misc:build_acyclic_graph( + fun (Module, Steps) -> vertices(Module, Steps, Scope) end, + fun (Module, Steps) -> edges(Module, Steps, Scope) end, + rabbit_misc:all_module_attributes(rabbit_upgrade)) of + {ok, G} -> try + Fun(G) + after + true = digraph:delete(G) + end; + {error, {vertex, duplicate, StepName}} -> + throw({error, {duplicate_upgrade_step, StepName}}); + {error, {edge, {bad_vertex, StepName}, _From, _To}} -> + throw({error, {dependency_on_unknown_upgrade_step, StepName}}); + {error, {edge, {bad_edge, StepNames}, _From, _To}} -> + throw({error, {cycle_in_upgrade_steps, StepNames}}) + end. + +vertices(Module, Steps, Scope0) -> + [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps, + Scope0 == Scope1]. + +edges(_Module, Steps, Scope0) -> + [{Require, StepName} || {StepName, Scope1, Requires} <- Steps, + Require <- Requires, + Scope0 == Scope1]. +unknown_heads(Heads, G) -> + [H || H <- Heads, digraph:vertex(G, H) =:= false]. + +upgrades_to_apply(Heads, G) -> + %% Take all the vertices which can reach the known heads. That's + %% everything we've already applied. Subtract that from all + %% vertices: that's what we have to apply. + Unsorted = sets:to_list( + sets:subtract( + sets:from_list(digraph:vertices(G)), + sets:from_list(digraph_utils:reaching(Heads, G)))), + %% Form a subgraph from that list and find a topological ordering + %% so we can invoke them in order. + [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. + +heads(G) -> + lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). %% ------------------------------------------------------------------- -matches([], []) -> - true; -matches([{Scope, SV}|VerA], [{Scope, SV}|VerB]) -> - matches(VerA, VerB); -matches([{Scope, SVA}|VerA], [{Scope, SVB}|VerB]) -> - case {lists:usort(SVA), lists:usort(SVB)} of - {SV, SV} -> matches(VerA, VerB); - _ -> false - end; -matches(_VerA, _VerB) -> - false. - -categorise_by_scope(Heads) when is_list(Heads) -> +categorise_by_scope(Version) when is_list(Version) -> Categorised = [{Scope, Name} || {_Module, Attributes} <- rabbit_misc:all_module_attributes(rabbit_upgrade), {Name, Scope, _Requires} <- Attributes, - lists:member(Name, Heads)], + lists:member(Name, Version)], orddict:to_list( - lists:foldl(fun ({Scope, Name}, Version) -> - rabbit_misc:orddict_cons(Scope, Name, Version) + lists:foldl(fun ({Scope, Name}, CatVersion) -> + rabbit_misc:orddict_cons(Scope, Name, CatVersion) end, orddict:new(), Categorised)). dir() -> rabbit_mnesia:dir(). -- cgit v1.2.1 From 5a390fde517e6f8539f75199b357d064d8c11541 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 09:17:27 +0000 Subject: renamings --- src/rabbit_mnesia.erl | 10 +++++----- src/rabbit_upgrade.erl | 8 ++++---- src/rabbit_version.erl | 51 +++++++++++++++++++++++++------------------------- 3 files changed, 34 insertions(+), 35 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4902cfeb..c598fbb9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -442,7 +442,7 @@ init_db(ClusterNodes, Force) -> {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up ensure_version_ok( - rpc:call(AnotherNode, rabbit_version, read, [])), + rpc:call(AnotherNode, rabbit_version, recorded, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), @@ -457,7 +457,7 @@ init_db(ClusterNodes, Force) -> %% If we're just starting up a new node we won't have %% a version version_not_available -> - ok = rabbit_version:write_desired_version() + ok = rabbit_version:record_desired() end, ensure_schema_integrity() end; @@ -484,14 +484,14 @@ schema_ok_or_move() -> end. ensure_version_ok({ok, DiscVersion}) -> - DesiredVersion = rabbit_version:desired_version(), + DesiredVersion = rabbit_version:desired(), case rabbit_version:'=~='(DesiredVersion, DiscVersion) of true -> ok; false -> throw({error, {schema_mismatch, DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> - ok = rabbit_version:write_desired_version(). + ok = rabbit_version:record_desired(). create_schema() -> mnesia:stop(), @@ -501,7 +501,7 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = rabbit_version:write_desired_version(). + ok = rabbit_version:record_desired(). move_db() -> mnesia:stop(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9347cc53..b4e1191e 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -141,13 +141,13 @@ upgrade_mode(AllNodes) -> end; [Another|_] -> ClusterVersion = - case rpc:call(Another, rabbit_version, desired_scope_version, + case rpc:call(Another, rabbit_version, desired_for_scope, [mnesia]) of {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; {badrpc, Reason} -> {unknown, Reason}; V -> V end, - MyVersion = rabbit_version:desired_scope_version(mnesia), + MyVersion = rabbit_version:desired_for_scope(mnesia), case rabbit_version:'=~='(ClusterVersion, MyVersion) of true -> %% The other node(s) have upgraded already, I am not the @@ -208,7 +208,7 @@ secondary_upgrade(AllNodes) -> end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = rabbit_mnesia:init_db(ClusterNodes, true), - ok = rabbit_version:write_desired_scope_version(mnesia), + ok = rabbit_version:record_desired_for_scope(mnesia), ok. nodes_running(Nodes) -> @@ -253,7 +253,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = rabbit_version:write_desired_scope_version(Scope), + ok = rabbit_version:record_desired_for_scope(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index 2d7ba8e4..e079df4a 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -16,14 +16,14 @@ -module(rabbit_version). --export([read/0, '=~='/2, desired_version/0, desired_scope_version/1, - write_desired_version/0, write_desired_scope_version/1, +-export([recorded/0, '=~='/2, desired/0, desired_for_scope/1, + record_desired/0, record_desired_for_scope/1, upgrades_required/1]). %% ------------------------------------------------------------------- -ifdef(use_specs). --export_type([scope/0, step/0, scope_version/0]). +-export_type([scope/0, step/0]). -type(scope() :: atom()). -type(scope_version() :: [atom()]). @@ -31,12 +31,12 @@ -type(version() :: [atom()]). --spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec('=~='/2 :: (version(), version()) -> boolean()). --spec(desired_version/0 :: () -> version()). --spec(desired_scope_version/1 :: (scope()) -> scope_version()). --spec(write_desired_version/0 :: () -> 'ok'). --spec(write_desired_scope_version/1 :: +-spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())). +-spec('=~='/2 :: ([A], [A]) -> boolean()). +-spec(desired/0 :: () -> version()). +-spec(desired_for_scope/1 :: (scope()) -> scope_version()). +-spec(record_desired/0 :: () -> 'ok'). +-spec(record_desired_for_scope/1 :: (scope()) -> rabbit_types:ok_or_error(any())). -spec(upgrades_required/1 :: (scope()) -> rabbit_types:ok_or_error2([step()], any())). @@ -49,15 +49,15 @@ %% ------------------------------------------------------------------- -read() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> {ok, V}; - {error, _} = Err -> Err - end. +recorded() -> case rabbit_misc:read_term_file(schema_filename()) of + {ok, [V]} -> {ok, V}; + {error, _} = Err -> Err + end. -write(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]). +record(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]). -read_scope_version(Scope) -> - case read() of +recorded_for_scope(Scope) -> + case recorded() of {error, _} = Err -> Err; {ok, Version} -> @@ -67,14 +67,14 @@ read_scope_version(Scope) -> end} end. -write_scope_version(Scope, ScopeVersion) -> - case read() of +record_for_scope(Scope, ScopeVersion) -> + case recorded() of {error, _} = Err -> Err; {ok, Version} -> Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version), {Scope, ScopeVersion}), - ok = write([Name || {_Scope, Names} <- Version1, Name <- Names]) + ok = record([Name || {_Scope, Names} <- Version1, Name <- Names]) end. %% ------------------------------------------------------------------- @@ -84,18 +84,17 @@ write_scope_version(Scope, ScopeVersion) -> %% ------------------------------------------------------------------- -desired_version() -> - [Name || Scope <- ?SCOPES, Name <- desired_scope_version(Scope)]. +desired() -> [Name || Scope <- ?SCOPES, Name <- desired_for_scope(Scope)]. -desired_scope_version(Scope) -> with_upgrade_graph(fun heads/1, Scope). +desired_for_scope(Scope) -> with_upgrade_graph(fun heads/1, Scope). -write_desired_version() -> write(desired_version()). +record_desired() -> record(desired()). -write_desired_scope_version(Scope) -> - write_scope_version(Scope, desired_scope_version(Scope)). +record_desired_for_scope(Scope) -> + record_for_scope(Scope, desired_for_scope(Scope)). upgrades_required(Scope) -> - case read_scope_version(Scope) of + case recorded_for_scope(Scope) of {error, enoent} -> {error, version_not_available}; {ok, CurrentHeads} -> -- cgit v1.2.1 From 349c24621ca359b5e6deac9d43ed8cefd0616152 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 09:22:03 +0000 Subject: If we have version_not_available, then it makes sense to have version_mismatch, not schema_mismatch --- src/rabbit_mnesia.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c598fbb9..867da779 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -487,8 +487,7 @@ ensure_version_ok({ok, DiscVersion}) -> DesiredVersion = rabbit_version:desired(), case rabbit_version:'=~='(DesiredVersion, DiscVersion) of true -> ok; - false -> throw({error, {schema_mismatch, - DesiredVersion, DiscVersion}}) + false -> throw({error, {version_mismatch, DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> ok = rabbit_version:record_desired(). -- cgit v1.2.1 From e74f420db40c772e77454dd05f32f7c172a2156a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 09:34:13 +0000 Subject: and again, _don't_ record confirm on immediate if we don't deliver... --- src/rabbit_amqqueue_process.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 4ebdb7a3..3f5758ce 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -838,7 +838,10 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% queues discarding the message? %% {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), - reply(Delivered, maybe_record_confirm_message(Confirm, State1)); + reply(Delivered, case Delivered of + true -> maybe_record_confirm_message(Confirm, State1); + false -> State1 + end); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. -- cgit v1.2.1 From 753447e36efb88eb1580a93c5331894d93d1621c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 14:41:03 +0000 Subject: Make sure we record if an exchange is actually deleted... --- src/rabbit_binding.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7ddb7814..1a9cbde1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -335,12 +335,13 @@ maybe_auto_delete(XName, Bindings, Deletions) -> [] -> add_deletion(XName, {undefined, not_deleted, Bindings}, Deletions); [X] -> - add_deletion(XName, {X, not_deleted, Bindings}, - case rabbit_exchange:maybe_auto_delete(X) of - not_deleted -> Deletions; - {deleted, Deletions1} -> combine_deletions( - Deletions, Deletions1) - end) + case rabbit_exchange:maybe_auto_delete(X) of + not_deleted -> + add_deletion(XName, {X, not_deleted, Bindings}, Deletions); + {deleted, Deletions1} -> + add_deletion(XName, {X, deleted, Bindings}, + combine_deletions(Deletions, Deletions1)) + end end. delete_forward_routes(Route) -> -- cgit v1.2.1 From 8449616cc83d64477ad4fc69921c1e227f7be3a1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 14:53:30 +0000 Subject: Make code prettier --- src/rabbit_binding.erl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1a9cbde1..6167790e 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -331,18 +331,18 @@ group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings) -> group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc), Removed). maybe_auto_delete(XName, Bindings, Deletions) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> - add_deletion(XName, {undefined, not_deleted, Bindings}, Deletions); - [X] -> - case rabbit_exchange:maybe_auto_delete(X) of - not_deleted -> - add_deletion(XName, {X, not_deleted, Bindings}, Deletions); - {deleted, Deletions1} -> - add_deletion(XName, {X, deleted, Bindings}, - combine_deletions(Deletions, Deletions1)) - end - end. + {Entry, Deletions1} = + case mnesia:read({rabbit_exchange, XName}) of + [] -> {{undefined, not_deleted, Bindings}, Deletions}; + [X] -> case rabbit_exchange:maybe_auto_delete(X) of + not_deleted -> + {{X, not_deleted, Bindings}, Deletions}; + {deleted, Deletions2} -> + {{X, deleted, Bindings}, + combine_deletions(Deletions, Deletions2)} + end + end, + add_deletion(XName, Entry, Deletions1). delete_forward_routes(Route) -> ok = mnesia:delete_object(rabbit_route, Route, write), -- cgit v1.2.1 From d350c5ce08dff4f0ff64d5d294c00b0866932121 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 16:26:25 +0000 Subject: rabbit:stop() is not always called when rabbit is stopping. E.g. q(). doesn't invoke rabbit:stop/0. It does invoke rabbit:stop/1 though. --- src/rabbit.erl | 2 +- src/rabbit_mnesia.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index b1d88a52..5f88b997 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -204,7 +204,6 @@ start() -> end. stop() -> - ok = rabbit_mnesia:record_running_disc_nodes(), ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> @@ -246,6 +245,7 @@ start(normal, []) -> end. stop(_State) -> + ok = rabbit_mnesia:record_running_disc_nodes(), terminated_ok = error_logger:delete_report_handler(rabbit_error_logger), ok = rabbit_alarm:stop(), ok = case rabbit_mnesia:is_clustered() of diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 867da779..4d3267a2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -378,7 +378,7 @@ delete_cluster_nodes_config() -> end. running_nodes_filename() -> - dir() ++ "/nodes_running_at_shutdown". + filename:join(dir(), "nodes_running_at_shutdown"). record_running_disc_nodes() -> FileName = running_nodes_filename(), -- cgit v1.2.1 From 544081a948a2ecc2e114dfb81aaf268cf10d966d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 16:37:54 +0000 Subject: Improve symmetry: if we write the running_disc_nodes on rabbit shutdown, we should nuke it on rabbit startup. This then means that the prelaunch thingy is always run with the previously_running_disc_nodes file present. I believe this makes no semantic changes, but the improved symmetry is worth having --- src/rabbit.erl | 1 + src/rabbit_upgrade.erl | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 5f88b997..1361d0f4 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -233,6 +233,7 @@ rotate_logs(BinarySuffix) -> start(normal, []) -> case erts_version_check() of ok -> + ok = rabbit_mnesia:delete_previously_running_disc_nodes(), {ok, SupPid} = rabbit_sup:start_link(), true = register(rabbit, self()), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b4e1191e..20f53da2 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -112,8 +112,7 @@ maybe_upgrade_mnesia() -> primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(AllNodes) end - end, - ok = rabbit_mnesia:delete_previously_running_disc_nodes(). + end. upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of -- cgit v1.2.1 From b13bd327e6d58bfe4fdeb8f8c14f666f493fbe54 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 16:53:15 +0000 Subject: Can't call =~= with non-version args, thus shuffle things around a bit. End up saving a line. --- src/rabbit_upgrade.erl | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 20f53da2..875d971a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -139,24 +139,23 @@ upgrade_mode(AllNodes) -> []) end; [Another|_] -> - ClusterVersion = - case rpc:call(Another, rabbit_version, desired_for_scope, - [mnesia]) of - {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; - {badrpc, Reason} -> {unknown, Reason}; - V -> V - end, MyVersion = rabbit_version:desired_for_scope(mnesia), - case rabbit_version:'=~='(ClusterVersion, MyVersion) of - true -> - %% The other node(s) have upgraded already, I am not the - %% upgrader - secondary; - false -> - %% The other node(s) are running an unexpected version. - die("Cluster upgrade needed but other nodes are " - "running ~p~nand I want ~p", - [ClusterVersion, MyVersion]) + ErrFun = fun (ClusterVersion) -> + %% The other node(s) are running an + %% unexpected version. + die("Cluster upgrade needed but other nodes are " + "running ~p~nand I want ~p", + [ClusterVersion, MyVersion]) + end, + case rpc:call(Another, rabbit_version, desired_for_scope, + [mnesia]) of + {badrpc, {'EXIT', {undef, _}}} -> ErrFun(unknown_old_version); + {badrpc, Reason} -> ErrFun({unknown, Reason}); + CV -> case rabbit_version:'=~='( + MyVersion, CV) of + true -> secondary; + false -> ErrFun(CV) + end end end. -- cgit v1.2.1 From 0f23637e28f62110bab3bff3715d5fa8f5dc4c17 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 18:02:50 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index a08bbd70..2b162f9d 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1147,7 +1147,7 @@ orddict_store(Key, Val, Dict) -> orddict:store(Key, Val, Dict). update_pending_confirms(Fun, CRef, - State = #msstate { clients = Clients, + State = #msstate { clients = Clients, cref_to_msg_ids = CTM }) -> case dict:fetch(CRef, Clients) of {undefined, _CloseFDsFun} -> State; -- cgit v1.2.1 From 3ae4322d27ee90b19d774418c43fd7e8a0b75ac4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Mar 2011 12:15:51 +0000 Subject: Ensure mnesia is stopped for the local upgrade backup. --- src/rabbit_upgrade.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b4e1191e..2c31e602 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -182,7 +182,6 @@ primary_upgrade(Upgrades, Nodes) -> mnesia, Upgrades, fun () -> - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), case Others of [] -> ok; @@ -227,7 +226,8 @@ maybe_upgrade_local() -> {error, version_not_available} -> version_not_available; {error, _} = Err -> throw(Err); {ok, []} -> ok; - {ok, Upgrades} -> apply_upgrades(local, Upgrades, + {ok, Upgrades} -> mnesia:stop(), + apply_upgrades(local, Upgrades, fun () -> ok end) end. @@ -249,6 +249,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> ok = file:delete(lock_filename(BackupDir)), info("~s upgrades: Mnesia dir backed up to ~p~n", [Scope, BackupDir]), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), Fun(), [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", -- cgit v1.2.1 From 82ea108bc5c4f17283f0b0080f7dfcf9baea123d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Mar 2011 13:38:20 +0000 Subject: Take a single backup before any upgrade, remove it when we're all clear. --- src/rabbit_upgrade.erl | 106 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 69 insertions(+), 37 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index c061cd49..d56b50b2 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -16,7 +16,8 @@ -module(rabbit_upgrade). --export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). +-export([maybe_backup/0, maybe_upgrade_mnesia/0, maybe_upgrade_local/0, + maybe_remove_backup/0]). -include("rabbit.hrl"). @@ -27,8 +28,10 @@ -ifdef(use_specs). +-spec(maybe_backup/0 :: () -> 'ok'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). +-spec(maybe_remove_backup/0 :: () -> 'ok'). -endif. @@ -91,11 +94,66 @@ %% ------------------------------------------------------------------- +maybe_backup() -> + case backup_required() of + true -> backup(); + _ -> ok + end. + +backup() -> + rabbit:prepare(), %% Ensure we have logs for this + LockFile = lock_filename(dir()), + case rabbit_misc:lock_file(LockFile) of + ok -> + BackupDir = backup_dir(), + case rabbit_mnesia:copy_db(BackupDir) of + ok -> + %% We need to make the backup after creating the + %% lock file so that it protects us from trying to + %% overwrite the backup. Unfortunately this means + %% the lock file exists in the backup too, which + %% is not intuitive. Remove it. + ok = file:delete(lock_filename(BackupDir)), + info("upgrades: Mnesia dir backed up to ~p~n", [BackupDir]); + {error, E} -> + %% If we can't backup, the upgrade hasn't started + %% hence we don't need the lockfile since the real + %% mnesia dir is the good one. + ok = file:delete(LockFile), + throw({could_not_back_up_mnesia_dir, E}) + end; + {error, eexist} -> + throw({error, previous_upgrade_failed}) + end. + + +maybe_remove_backup() -> + case file:read_file_info(backup_dir()) of + {ok, _} -> remove_backup(); + _ -> ok + end. + +remove_backup() -> + LockFile = lock_filename(dir()), + BackupDir = backup_dir(), + ok = rabbit_misc:recursive_delete([BackupDir]), + info("upgrades: Mnesia backup removed~n", []), + ok = file:delete(LockFile). + +backup_required() -> + case {rabbit_version:upgrades_required(mnesia), + rabbit_version:upgrades_required(local)} of + {{ok, []}, {ok, []}} -> false; + {_, {ok, _}} -> true; + {{ok, _}, _} -> true; + _ -> false + end. + maybe_upgrade_mnesia() -> + maybe_backup(), AllNodes = rabbit_mnesia:all_clustered_nodes(), case rabbit_version:upgrades_required(mnesia) of {error, version_not_available} -> - rabbit:prepare(), %% Ensure we have logs for this case AllNodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " @@ -227,45 +285,18 @@ maybe_upgrade_local() -> {ok, Upgrades} -> mnesia:stop(), apply_upgrades(local, Upgrades, fun () -> ok end) - end. + end, + maybe_remove_backup(). %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> - LockFile = lock_filename(dir()), - case rabbit_misc:lock_file(LockFile) of - ok -> - BackupDir = dir() ++ "-upgrade-backup", - info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), - case rabbit_mnesia:copy_db(BackupDir) of - ok -> - %% We need to make the backup after creating the - %% lock file so that it protects us from trying to - %% overwrite the backup. Unfortunately this means - %% the lock file exists in the backup too, which - %% is not intuitive. Remove it. - ok = file:delete(lock_filename(BackupDir)), - info("~s upgrades: Mnesia dir backed up to ~p~n", - [Scope, BackupDir]), - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - Fun(), - [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], - info("~s upgrades: All upgrades applied successfully~n", - [Scope]), - ok = rabbit_version:record_desired_for_scope(Scope), - ok = rabbit_misc:recursive_delete([BackupDir]), - info("~s upgrades: Mnesia backup removed~n", [Scope]), - ok = file:delete(LockFile); - {error, E} -> - %% If we can't backup, the upgrade hasn't started - %% hence we don't need the lockfile since the real - %% mnesia dir is the good one. - ok = file:delete(LockFile), - throw({could_not_back_up_mnesia_dir, E}) - end; - {error, eexist} -> - throw({error, previous_upgrade_failed}) - end. + info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + Fun(), + [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], + info("~s upgrades: All upgrades applied successfully~n", [Scope]), + ok = rabbit_version:record_desired_for_scope(Scope). apply_upgrade(Scope, {M, F}) -> info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), @@ -276,6 +307,7 @@ apply_upgrade(Scope, {M, F}) -> dir() -> rabbit_mnesia:dir(). lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). +backup_dir() -> dir() ++ "-upgrade-backup". %% NB: we cannot use rabbit_log here since it may not have been %% started yet -- cgit v1.2.1 From 9c46af9fe5bbd03d96a51a67d0dc576fa8573415 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Mar 2011 16:36:02 +0000 Subject: Rip out msg_store:release --- src/rabbit_msg_store.erl | 11 +---------- src/rabbit_tests.erl | 2 -- src/rabbit_variable_queue.erl | 7 +------ 3 files changed, 2 insertions(+), 18 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 2b162f9d..4ec77006 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -21,7 +21,7 @@ -export([start_link/4, successfully_recovered_state/1, client_init/4, client_terminate/1, client_delete_and_terminate/1, client_ref/1, close_all_indicated/1, - write/3, read/2, contains/2, remove/2, release/2, sync/3]). + write/3, read/2, contains/2, remove/2, sync/3]). -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal @@ -153,7 +153,6 @@ {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). -spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). -spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). --spec(release/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). -spec(sync/3 :: ([rabbit_types:msg_id()], fun (() -> any()), client_msstate()) -> 'ok'). @@ -457,8 +456,6 @@ contains(MsgId, CState) -> server_call(CState, {contains, MsgId}). remove([], _CState) -> ok; remove(MsgIds, CState = #client_msstate { client_ref = CRef }) -> server_cast(CState, {remove, CRef, MsgIds}). -release([], _CState) -> ok; -release(MsgIds, CState) -> server_cast(CState, {release, MsgIds}). sync(MsgIds, K, CState) -> server_cast(CState, {sync, MsgIds, K}). sync(Server) -> @@ -781,12 +778,6 @@ handle_cast({remove, CRef, MsgIds}, State) -> noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(MsgIds), removed, State1))); -handle_cast({release, MsgIds}, State = - #msstate { dedup_cache_ets = DedupCacheEts }) -> - lists:foreach( - fun (MsgId) -> decrement_cache(DedupCacheEts, MsgId) end, MsgIds), - noreply(State); - handle_cast({sync, MsgIds, K}, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ca046c91..ad8e2485 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1813,8 +1813,6 @@ test_msg_store() -> true = msg_store_contains(true, MsgIds2ndHalf, MSCState2), %% read the second half again MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2), - %% release the second half, just for fun (aka code coverage) - ok = rabbit_msg_store:release(MsgIds2ndHalf, MSCState3), %% read the second half again, just for fun (aka code coverage) MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3), ok = rabbit_msg_store:client_terminate(MSCState4), diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 1b29756b..8c9d62a7 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -725,7 +725,7 @@ requeue(AckTags, MsgPropsFun, State) -> needs_confirming = false } end, a(reduce_memory_use( - ack(fun msg_store_release/3, + ack(fun (_, _, _) -> ok end, fun (#msg_status { msg = Msg, msg_props = MsgProps }, State1) -> {_SeqId, State2} = publish(Msg, MsgPropsFun1(MsgProps), true, false, State1), @@ -969,11 +969,6 @@ msg_store_remove(MSCState, IsPersistent, MsgIds) -> MSCState, IsPersistent, fun (MCSState1) -> rabbit_msg_store:remove(MsgIds, MCSState1) end). -msg_store_release(MSCState, IsPersistent, MsgIds) -> - with_immutable_msg_store_state( - MSCState, IsPersistent, - fun (MCSState1) -> rabbit_msg_store:release(MsgIds, MCSState1) end). - msg_store_sync(MSCState, IsPersistent, MsgIds, Fun) -> with_immutable_msg_store_state( MSCState, IsPersistent, -- cgit v1.2.1 From b8f6018e0dc8ee6f5e67ee1d58fafa938185c42d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Mar 2011 16:45:53 +0000 Subject: Rip out dedup cache --- src/rabbit_msg_store.erl | 129 ++++++++++------------------------------------- 1 file changed, 28 insertions(+), 101 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 4ec77006..bc68d2cd 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -67,7 +67,6 @@ gc_pid, %% pid of our GC file_handles_ets, %% tid of the shared file handles table file_summary_ets, %% tid of the file summary table - dedup_cache_ets, %% tid of dedup cache table cur_file_cache_ets, %% tid of current file cache table dying_clients, %% set of dying clients clients, %% map of references of all registered clients @@ -87,7 +86,6 @@ gc_pid, file_handles_ets, file_summary_ets, - dedup_cache_ets, cur_file_cache_ets }). @@ -130,7 +128,6 @@ gc_pid :: pid(), file_handles_ets :: ets:tid(), file_summary_ets :: ets:tid(), - dedup_cache_ets :: ets:tid(), cur_file_cache_ets :: ets:tid()}). -type(msg_ref_delta_gen(A) :: fun ((A) -> 'finished' | @@ -395,7 +392,7 @@ successfully_recovered_state(Server) -> client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) -> {IState, IModule, Dir, GCPid, - FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts} = + FileHandlesEts, FileSummaryEts, CurFileCacheEts} = gen_server2:call( Server, {new_client_state, Ref, MsgOnDiskFun, CloseFDsFun}, infinity), #client_msstate { server = Server, @@ -407,7 +404,6 @@ client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) -> gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts }. client_terminate(CState = #client_msstate { client_ref = Ref }) -> @@ -428,27 +424,16 @@ write(MsgId, Msg, ok = server_cast(CState, {write, CRef, MsgId}). read(MsgId, - CState = #client_msstate { dedup_cache_ets = DedupCacheEts, - cur_file_cache_ets = CurFileCacheEts }) -> - %% 1. Check the dedup cache - case fetch_and_increment_cache(DedupCacheEts, MsgId) of - not_found -> - %% 2. Check the cur file cache - case ets:lookup(CurFileCacheEts, MsgId) of - [] -> - Defer = fun() -> - {server_call(CState, {read, MsgId}), CState} - end, - case index_lookup_positive_ref_count(MsgId, CState) of - not_found -> Defer(); - MsgLocation -> client_read1(MsgLocation, Defer, CState) - end; - [{MsgId, Msg, _CacheRefCount}] -> - %% Although we've found it, we don't know the - %% refcount, so can't insert into dedup cache - {{ok, Msg}, CState} + CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) -> + %% Check the cur file cache + case ets:lookup(CurFileCacheEts, MsgId) of + [] -> + Defer = fun() -> {server_call(CState, {read, MsgId}), CState} end, + case index_lookup_positive_ref_count(MsgId, CState) of + not_found -> Defer(); + MsgLocation -> client_read1(MsgLocation, Defer, CState) end; - Msg -> + [{MsgId, Msg, _CacheRefCount}] -> {{ok, Msg}, CState} end. @@ -514,7 +499,6 @@ client_read2(false, _Right, client_read3(#msg_location { msg_id = MsgId, file = File }, Defer, CState = #client_msstate { file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, gc_pid = GCPid, client_ref = Ref }) -> Release = @@ -571,8 +555,8 @@ client_read3(#msg_location { msg_id = MsgId, file = File }, Defer, %% Could the msg_store now mark the file to be %% closed? No: marks for closing are issued only %% when the msg_store has locked the file. - {Msg, CState2} = %% This will never be the current file - read_from_disk(MsgLocation, CState1, DedupCacheEts), + %% This will never be the current file + {Msg, CState2} = read_from_disk(MsgLocation, CState1), Release(), %% this MUST NOT fail with badarg {{ok, Msg}, CState2}; #msg_location {} = MsgLocation -> %% different file! @@ -636,7 +620,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> %% CleanShutdown <=> msg location index and file_summary both %% recovered correctly. - DedupCacheEts = ets:new(rabbit_msg_store_dedup_cache, [set, public]), FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles, [ordered_set, public]), CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]), @@ -666,7 +649,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts, dying_clients = sets:new(), clients = Clients, @@ -717,14 +699,12 @@ handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, index_module = IndexModule, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts, clients = Clients, gc_pid = GCPid }) -> Clients1 = dict:store(CRef, {MsgOnDiskFun, CloseFDsFun}, Clients), - reply({IndexState, IndexModule, Dir, GCPid, - FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts}, - State #msstate { clients = Clients1 }); + reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts, + CurFileCacheEts}, State #msstate { clients = Clients1 }); handle_call({client_terminate, CRef}, _From, State) -> reply(ok, clear_client(CRef, State)); @@ -831,7 +811,6 @@ terminate(_Reason, State = #msstate { index_state = IndexState, gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts, clients = Clients, dir = Dir }) -> @@ -846,8 +825,7 @@ terminate(_Reason, State = #msstate { index_state = IndexState, end, State3 = close_all_handles(State1), store_file_summary(FileSummaryEts, Dir), - [ets:delete(T) || - T <- [FileSummaryEts, DedupCacheEts, FileHandlesEts, CurFileCacheEts]], + [ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts, CurFileCacheEts]], IndexModule:terminate(IndexState), store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, {index_module, IndexModule}], Dir), @@ -966,26 +944,18 @@ write_message(MsgId, Msg, sum_valid_data = SumValid + TotalSize, sum_file_size = SumFileSize + TotalSize }). -read_message(MsgId, From, - State = #msstate { dedup_cache_ets = DedupCacheEts }) -> +read_message(MsgId, From, State) -> case index_lookup_positive_ref_count(MsgId, State) of - not_found -> - gen_server2:reply(From, not_found), - State; - MsgLocation -> - case fetch_and_increment_cache(DedupCacheEts, MsgId) of - not_found -> read_message1(From, MsgLocation, State); - Msg -> gen_server2:reply(From, {ok, Msg}), - State - end + not_found -> gen_server2:reply(From, not_found), + State; + MsgLocation -> read_message1(From, MsgLocation, State) end. -read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, - file = File, offset = Offset } = MsgLoc, +read_message1(From, #msg_location { msg_id = MsgId, file = File, + offset = Offset } = MsgLoc, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts }) -> case File =:= CurFile of true -> {Msg, State1} = @@ -998,10 +968,8 @@ read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, true -> file_handle_cache:flush(CurHdl); false -> ok end, - read_from_disk(MsgLoc, State, DedupCacheEts); + read_from_disk(MsgLoc, State); [{MsgId, Msg1, _CacheRefCount}] -> - ok = maybe_insert_into_cache( - DedupCacheEts, RefCount, MsgId, Msg1), {Msg1, State} end, gen_server2:reply(From, {ok, Msg}), @@ -1011,17 +979,14 @@ read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, case Locked of true -> add_to_pending_gc_completion({read, MsgId, From}, File, State); - false -> {Msg, State1} = - read_from_disk(MsgLoc, State, DedupCacheEts), + false -> {Msg, State1} = read_from_disk(MsgLoc, State), gen_server2:reply(From, {ok, Msg}), State1 end end. -read_from_disk(#msg_location { msg_id = MsgId, ref_count = RefCount, - file = File, offset = Offset, - total_size = TotalSize }, - State, DedupCacheEts) -> +read_from_disk(#msg_location { msg_id = MsgId, file = File, offset = Offset, + total_size = TotalSize }, State) -> {Hdl, State1} = get_read_handle(File, State), {ok, Offset} = file_handle_cache:position(Hdl, Offset), {ok, {MsgId, Msg}} = @@ -1037,7 +1002,6 @@ read_from_disk(#msg_location { msg_id = MsgId, ref_count = RefCount, {proc_dict, get()} ]}} end, - ok = maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg), {Msg, State1}. contains_message(MsgId, From, @@ -1056,8 +1020,7 @@ contains_message(MsgId, From, end. remove_message(MsgId, CRef, - State = #msstate { file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts }) -> + State = #msstate { file_summary_ets = FileSummaryEts }) -> case should_mask_action(CRef, MsgId, State) of {true, _Location} -> State; @@ -1078,8 +1041,7 @@ remove_message(MsgId, CRef, %% don't remove from CUR_FILE_CACHE_ETS_NAME here %% because there may be further writes in the mailbox %% for the same msg. - 1 -> ok = remove_cache_entry(DedupCacheEts, MsgId), - case ets:lookup(FileSummaryEts, File) of + 1 -> case ets:lookup(FileSummaryEts, File) of [#file_summary { locked = true }] -> add_to_pending_gc_completion( {remove, MsgId, CRef}, File, State); @@ -1089,8 +1051,7 @@ remove_message(MsgId, CRef, File, adjust_valid_total_size(File, -TotalSize, State)) end; - _ -> ok = decrement_cache(DedupCacheEts, MsgId), - ok = Dec(), + _ -> ok = Dec(), State end end. @@ -1313,12 +1274,6 @@ list_sorted_file_names(Dir, Ext) -> %% message cache helper functions %%---------------------------------------------------------------------------- -maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg) - when RefCount > 1 -> - update_msg_cache(DedupCacheEts, MsgId, Msg); -maybe_insert_into_cache(_DedupCacheEts, _RefCount, _MsgId, _Msg) -> - ok. - update_msg_cache(CacheEts, MsgId, Msg) -> case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of true -> ok; @@ -1327,34 +1282,6 @@ update_msg_cache(CacheEts, MsgId, Msg) -> fun () -> update_msg_cache(CacheEts, MsgId, Msg) end) end. -remove_cache_entry(DedupCacheEts, MsgId) -> - true = ets:delete(DedupCacheEts, MsgId), - ok. - -fetch_and_increment_cache(DedupCacheEts, MsgId) -> - case ets:lookup(DedupCacheEts, MsgId) of - [] -> - not_found; - [{_MsgId, Msg, _RefCount}] -> - safe_ets_update_counter_ok( - DedupCacheEts, MsgId, {3, +1}, - %% someone has deleted us in the meantime, insert us - fun () -> ok = update_msg_cache(DedupCacheEts, MsgId, Msg) end), - Msg - end. - -decrement_cache(DedupCacheEts, MsgId) -> - true = safe_ets_update_counter( - DedupCacheEts, MsgId, {3, -1}, - fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, MsgId); - (_N) -> true - end, - %% MsgId is not in there because although it's been - %% delivered, it's never actually been read (think: - %% persistent message held in RAM) - fun () -> true end), - ok. - %%---------------------------------------------------------------------------- %% index %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 5f295dc115d1d93428377051530f79ca26064c20 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Mar 2011 17:57:04 +0000 Subject: Well I thought =~= was beautiful and appropriately approximate to == --- src/rabbit_mnesia.erl | 2 +- src/rabbit_upgrade.erl | 2 +- src/rabbit_version.erl | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4d3267a2..869f09a1 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -485,7 +485,7 @@ schema_ok_or_move() -> ensure_version_ok({ok, DiscVersion}) -> DesiredVersion = rabbit_version:desired(), - case rabbit_version:'=~='(DesiredVersion, DiscVersion) of + case rabbit_version:matches(DesiredVersion, DiscVersion) of true -> ok; false -> throw({error, {version_mismatch, DesiredVersion, DiscVersion}}) end; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index d56b50b2..866f20ee 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -209,7 +209,7 @@ upgrade_mode(AllNodes) -> [mnesia]) of {badrpc, {'EXIT', {undef, _}}} -> ErrFun(unknown_old_version); {badrpc, Reason} -> ErrFun({unknown, Reason}); - CV -> case rabbit_version:'=~='( + CV -> case rabbit_version:matches( MyVersion, CV) of true -> secondary; false -> ErrFun(CV) diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index e079df4a..400abc10 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -16,7 +16,7 @@ -module(rabbit_version). --export([recorded/0, '=~='/2, desired/0, desired_for_scope/1, +-export([recorded/0, matches/2, desired/0, desired_for_scope/1, record_desired/0, record_desired_for_scope/1, upgrades_required/1]). @@ -32,7 +32,7 @@ -type(version() :: [atom()]). -spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec('=~='/2 :: ([A], [A]) -> boolean()). +-spec(matches/2 :: ([A], [A]) -> boolean()). -spec(desired/0 :: () -> version()). -spec(desired_for_scope/1 :: (scope()) -> scope_version()). -spec(record_desired/0 :: () -> 'ok'). @@ -79,7 +79,7 @@ record_for_scope(Scope, ScopeVersion) -> %% ------------------------------------------------------------------- -'=~='(VerA, VerB) -> +matches(VerA, VerB) -> lists:usort(VerA) =:= lists:usort(VerB). %% ------------------------------------------------------------------- -- cgit v1.2.1 From 50f18ad821cf3e68d9fa9c67eaa2f72106b4aa84 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 14:00:31 +0000 Subject: Various QA tidyups, and stop exporting the backup / remove backup functions. --- src/rabbit_upgrade.erl | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index d56b50b2..09530f38 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -16,8 +16,7 @@ -module(rabbit_upgrade). --export([maybe_backup/0, maybe_upgrade_mnesia/0, maybe_upgrade_local/0, - maybe_remove_backup/0]). +-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). -include("rabbit.hrl"). @@ -28,10 +27,8 @@ -ifdef(use_specs). --spec(maybe_backup/0 :: () -> 'ok'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). --spec(maybe_remove_backup/0 :: () -> 'ok'). -endif. @@ -94,13 +91,13 @@ %% ------------------------------------------------------------------- -maybe_backup() -> +maybe_take_backup() -> case backup_required() of - true -> backup(); + true -> take_backup(); _ -> ok end. -backup() -> +take_backup() -> rabbit:prepare(), %% Ensure we have logs for this LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of @@ -128,17 +125,15 @@ backup() -> maybe_remove_backup() -> - case file:read_file_info(backup_dir()) of + case filelib:is_dir(backup_dir()) of {ok, _} -> remove_backup(); _ -> ok end. remove_backup() -> - LockFile = lock_filename(dir()), - BackupDir = backup_dir(), - ok = rabbit_misc:recursive_delete([BackupDir]), + ok = rabbit_misc:recursive_delete([backup_dir()]), info("upgrades: Mnesia backup removed~n", []), - ok = file:delete(LockFile). + ok = file:delete(lock_filename(dir())). backup_required() -> case {rabbit_version:upgrades_required(mnesia), @@ -150,7 +145,7 @@ backup_required() -> end. maybe_upgrade_mnesia() -> - maybe_backup(), + maybe_take_backup(), AllNodes = rabbit_mnesia:all_clustered_nodes(), case rabbit_version:upgrades_required(mnesia) of {error, version_not_available} -> @@ -278,15 +273,16 @@ node_running(Node) -> %% ------------------------------------------------------------------- maybe_upgrade_local() -> - case rabbit_version:upgrades_required(local) of - {error, version_not_available} -> version_not_available; - {error, _} = Err -> throw(Err); - {ok, []} -> ok; - {ok, Upgrades} -> mnesia:stop(), - apply_upgrades(local, Upgrades, - fun () -> ok end) - end, - maybe_remove_backup(). + Res = case rabbit_version:upgrades_required(local) of + {error, version_not_available} -> version_not_available; + {error, _} = Err -> throw(Err); + {ok, []} -> ok; + {ok, Upgrades} -> mnesia:stop(), + apply_upgrades(local, Upgrades, + fun () -> ok end) + end, + maybe_remove_backup(), + Res. %% ------------------------------------------------------------------- -- cgit v1.2.1 From b38be006e69e96cdd2e81929b874cd43bad0b9f0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 14:22:51 +0000 Subject: maybe_remove_backup is safe when returning version_not_available since we would not have taken a backup in the first place. However, this is not exactly obvious, so let's not do that. --- src/rabbit_upgrade.erl | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b9c7b8dc..73c9ee2b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -273,16 +273,15 @@ node_running(Node) -> %% ------------------------------------------------------------------- maybe_upgrade_local() -> - Res = case rabbit_version:upgrades_required(local) of - {error, version_not_available} -> version_not_available; - {error, _} = Err -> throw(Err); - {ok, []} -> ok; - {ok, Upgrades} -> mnesia:stop(), - apply_upgrades(local, Upgrades, - fun () -> ok end) - end, - maybe_remove_backup(), - Res. + case rabbit_version:upgrades_required(local) of + {error, version_not_available} -> version_not_available; + {error, _} = Err -> throw(Err); + {ok, []} -> maybe_remove_backup(); + {ok, Upgrades} -> mnesia:stop(), + apply_upgrades(local, Upgrades, + fun () -> ok end), + maybe_remove_backup() + end. %% ------------------------------------------------------------------- -- cgit v1.2.1 From 7f13bc65ab2ea9a1c712990781a80f225c2188e9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 14:28:34 +0000 Subject: Oops. --- src/rabbit_upgrade.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 73c9ee2b..e84e1f7b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -126,8 +126,8 @@ take_backup() -> maybe_remove_backup() -> case filelib:is_dir(backup_dir()) of - {ok, _} -> remove_backup(); - _ -> ok + true -> ok = remove_backup(); + _ -> ok end. remove_backup() -> -- cgit v1.2.1 From 18c265d38bd490cd421ce05b29551e65b5b82747 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 16:42:36 +0000 Subject: Don't try to determine whether a backup is needed before doing anything, take it as needed. This inverts the backup and the lock file - the backup now comes first and the lock file is only used to defend apply_upgrades/3. --- src/rabbit_upgrade.erl | 58 ++++++++++++++++---------------------------------- 1 file changed, 18 insertions(+), 40 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index e84e1f7b..0a7e4a37 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -91,39 +91,24 @@ %% ------------------------------------------------------------------- -maybe_take_backup() -> - case backup_required() of - true -> take_backup(); - _ -> ok +ensure_backup() -> + case filelib:is_file(lock_filename()) of + false -> case filelib:is_dir(backup_dir()) of + false -> ok = take_backup(); + _ -> ok + end; + true -> throw({error, previous_upgrade_failed}) end. take_backup() -> rabbit:prepare(), %% Ensure we have logs for this - LockFile = lock_filename(dir()), - case rabbit_misc:lock_file(LockFile) of - ok -> - BackupDir = backup_dir(), - case rabbit_mnesia:copy_db(BackupDir) of - ok -> - %% We need to make the backup after creating the - %% lock file so that it protects us from trying to - %% overwrite the backup. Unfortunately this means - %% the lock file exists in the backup too, which - %% is not intuitive. Remove it. - ok = file:delete(lock_filename(BackupDir)), - info("upgrades: Mnesia dir backed up to ~p~n", [BackupDir]); - {error, E} -> - %% If we can't backup, the upgrade hasn't started - %% hence we don't need the lockfile since the real - %% mnesia dir is the good one. - ok = file:delete(LockFile), - throw({could_not_back_up_mnesia_dir, E}) - end; - {error, eexist} -> - throw({error, previous_upgrade_failed}) + BackupDir = backup_dir(), + case rabbit_mnesia:copy_db(BackupDir) of + ok -> info("upgrades: Mnesia dir backed up to ~p~n", + [BackupDir]); + {error, E} -> throw({could_not_back_up_mnesia_dir, E}) end. - maybe_remove_backup() -> case filelib:is_dir(backup_dir()) of true -> ok = remove_backup(); @@ -132,20 +117,9 @@ maybe_remove_backup() -> remove_backup() -> ok = rabbit_misc:recursive_delete([backup_dir()]), - info("upgrades: Mnesia backup removed~n", []), - ok = file:delete(lock_filename(dir())). - -backup_required() -> - case {rabbit_version:upgrades_required(mnesia), - rabbit_version:upgrades_required(local)} of - {{ok, []}, {ok, []}} -> false; - {_, {ok, _}} -> true; - {{ok, _}, _} -> true; - _ -> false - end. + info("upgrades: Mnesia backup removed~n", []). maybe_upgrade_mnesia() -> - maybe_take_backup(), AllNodes = rabbit_mnesia:all_clustered_nodes(), case rabbit_version:upgrades_required(mnesia) of {error, version_not_available} -> @@ -286,12 +260,15 @@ maybe_upgrade_local() -> %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> + ensure_backup(), + ok = rabbit_misc:lock_file(lock_filename()), info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), Fun(), [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = rabbit_version:record_desired_for_scope(Scope). + ok = rabbit_version:record_desired_for_scope(Scope), + ok = file:delete(lock_filename()). apply_upgrade(Scope, {M, F}) -> info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), @@ -301,6 +278,7 @@ apply_upgrade(Scope, {M, F}) -> dir() -> rabbit_mnesia:dir(). +lock_filename() -> lock_filename(dir()). lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). backup_dir() -> dir() ++ "-upgrade-backup". -- cgit v1.2.1 From e90061b37554d9acc9601ccdc64fb80cf5141901 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 16:51:34 +0000 Subject: When upgrading a secondary node we call init_db twice: once early to force a cluster rejoin (at which point we are not ready to do local upgrades, e.g. fhc is not running) and then once at the regular time. Deal with that. --- src/rabbit_mnesia.erl | 29 +++++++++++++++++------------ src/rabbit_upgrade.erl | 2 +- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 869f09a1..c1f8a22f 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -18,7 +18,7 @@ -module(rabbit_mnesia). -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, - cluster/1, force_cluster/1, reset/0, force_reset/0, init_db/2, + cluster/1, force_cluster/1, reset/0, force_reset/0, init_db/3, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, @@ -45,7 +45,7 @@ -spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). --spec(init_db/2 :: ([node()], boolean()) -> 'ok'). +-spec(init_db/3 :: ([node()], boolean(), boolean()) -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). -spec(cluster/1 :: ([node()]) -> 'ok'). -spec(force_cluster/1 :: ([node()]) -> 'ok'). @@ -90,7 +90,7 @@ status() -> init() -> ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), - ok = init_db(read_cluster_nodes_config(), true), + ok = init_db(read_cluster_nodes_config(), true, true), ok. is_db_empty() -> @@ -112,7 +112,7 @@ cluster(ClusterNodes, Force) -> ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes, Force), + ok = init_db(ClusterNodes, Force, true), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() @@ -413,7 +413,7 @@ delete_previously_running_disc_nodes() -> %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow %% connections to offline nodes. -init_db(ClusterNodes, Force) -> +init_db(ClusterNodes, Force, DoLocalUpgrades) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of @@ -451,13 +451,18 @@ init_db(ClusterNodes, Force) -> true -> disc; false -> ram end), - case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ok; - %% If we're just starting up a new node we won't have - %% a version - version_not_available -> - ok = rabbit_version:record_desired() + case DoLocalUpgrades of + true -> + case rabbit_upgrade:maybe_upgrade_local() of + ok -> + ok; + %% If we're just starting up a new + %% node we won't have a version + version_not_available -> + ok = rabbit_version:record_desired() + end; + false -> + ok end, ensure_schema_integrity() end; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0a7e4a37..6959208b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -231,7 +231,7 @@ secondary_upgrade(AllNodes) -> false -> AllNodes -- [node()] end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = rabbit_mnesia:init_db(ClusterNodes, true), + ok = rabbit_mnesia:init_db(ClusterNodes, true, false), ok = rabbit_version:record_desired_for_scope(mnesia), ok. -- cgit v1.2.1 From 49025c80d9eb23f59615f6a92522d48aee5bbd3a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 16:54:09 +0000 Subject: Better name, vertical space. --- src/rabbit_mnesia.erl | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c1f8a22f..47df1148 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -413,7 +413,7 @@ delete_previously_running_disc_nodes() -> %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow %% connections to offline nodes. -init_db(ClusterNodes, Force, DoLocalUpgrades) -> +init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of @@ -451,18 +451,16 @@ init_db(ClusterNodes, Force, DoLocalUpgrades) -> true -> disc; false -> ram end), - case DoLocalUpgrades of - true -> - case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ok; - %% If we're just starting up a new - %% node we won't have a version - version_not_available -> - ok = rabbit_version:record_desired() - end; - false -> - ok + case DoSecondaryLocalUpgrades of + true -> case rabbit_upgrade:maybe_upgrade_local() of + ok -> + ok; + %% If we're just starting up a new + %% node we won't have a version + version_not_available -> + ok = rabbit_version:record_desired() + end; + false -> ok end, ensure_schema_integrity() end; -- cgit v1.2.1 From f1d46d7b616b8cb325ff4f6e7f02569fc0e9f5f7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Mar 2011 17:56:34 +0000 Subject: Add test --- src/gm_speed_test.erl | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 src/gm_speed_test.erl diff --git a/src/gm_speed_test.erl b/src/gm_speed_test.erl new file mode 100644 index 00000000..defb0f29 --- /dev/null +++ b/src/gm_speed_test.erl @@ -0,0 +1,82 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gm_speed_test). + +-export([test/3]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). +-export([wile_e_coyote/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +%% callbacks + +joined(Owner, _Members) -> + Owner ! joined, + ok. + +members_changed(_Owner, _Births, _Deaths) -> + ok. + +handle_msg(Owner, _From, ping) -> + Owner ! ping, + ok. + +terminate(Owner, _Reason) -> + Owner ! terminated, + ok. + +%% other + +wile_e_coyote(Time, WriteUnit) -> + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self()), + receive joined -> ok end, + timer:sleep(1000), %% wait for all to join + timer:send_after(Time, stop), + Start = now(), + {Sent, Received} = loop(Pid, WriteUnit, 0, 0), + End = now(), + ok = gm:leave(Pid), + receive terminated -> ok end, + Elapsed = timer:now_diff(End, Start) / 1000000, + io:format("Sending rate: ~p msgs/sec~nReceiving rate: ~p msgs/sec~n~n", + [Sent/Elapsed, Received/Elapsed]), + ok. + +loop(Pid, WriteUnit, Sent, Received) -> + case read(Received) of + {stop, Received1} -> {Sent, Received1}; + {ok, Received1} -> ok = write(Pid, WriteUnit), + loop(Pid, WriteUnit, Sent + WriteUnit, Received1) + end. + +read(Count) -> + receive + ping -> read(Count + 1); + stop -> {stop, Count} + after 5 -> + {ok, Count} + end. + +write(_Pid, 0) -> ok; +write(Pid, N) -> ok = gm:broadcast(Pid, ping), + write(Pid, N - 1). + +test(Time, WriteUnit, Nodes) -> + ok = gm:create_tables(), + [spawn(Node, ?MODULE, wile_e_coyote, [Time, WriteUnit]) || Node <- Nodes]. -- cgit v1.2.1 From 0cca73f99636dd92c176a8caa54014651f58e25f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Mar 2011 17:57:54 +0000 Subject: Introduce batching (again - same diff as 5f7d8d07f94f) --- src/gm.erl | 134 ++++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 92 insertions(+), 42 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 8cf22581..5b3623cf 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -376,15 +376,16 @@ confirmed_broadcast/2, group_members/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, prioritise_info/2]). + code_change/3, prioritise_cast/2, prioritise_info/2]). -export([behaviour_info/1]). --export([table_definitions/0]). +-export([table_definitions/0, flush/1]). -define(GROUP_TABLE, gm_group). -define(HIBERNATE_AFTER_MIN, 1000). -define(DESIRED_HIBERNATE, 10000). +-define(BROADCAST_TIMER, 25). -define(SETS, ordsets). -define(DICT, orddict). @@ -398,7 +399,9 @@ pub_count, members_state, callback_args, - confirms + confirms, + broadcast_buffer, + broadcast_timer }). -record(gm_group, { name, version, members }). @@ -508,21 +511,26 @@ confirmed_broadcast(Server, Msg) -> group_members(Server) -> gen_server2:call(Server, group_members, infinity). +flush(Server) -> + gen_server2:cast(Server, flush). + init([GroupName, Module, Args]) -> random:seed(now()), gen_server2:cast(self(), join), Self = self(), - {ok, #state { self = Self, - left = {Self, undefined}, - right = {Self, undefined}, - group_name = GroupName, - module = Module, - view = undefined, - pub_count = 0, - members_state = undefined, - callback_args = Args, - confirms = queue:new() }, hibernate, + {ok, #state { self = Self, + left = {Self, undefined}, + right = {Self, undefined}, + group_name = GroupName, + module = Module, + view = undefined, + pub_count = 0, + members_state = undefined, + callback_args = Args, + confirms = queue:new(), + broadcast_buffer = [], + broadcast_timer = undefined }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -620,7 +628,11 @@ handle_cast(join, State = #state { self = Self, {Module:joined(Args, all_known_members(View)), State1}); handle_cast(leave, State) -> - {stop, normal, State}. + {stop, normal, State}; + +handle_cast(flush, State) -> + noreply( + flush_broadcast_buffer(State #state { broadcast_timer = undefined })). handle_info({'DOWN', MRef, process, _Pid, _Reason}, @@ -662,14 +674,17 @@ handle_info({'DOWN', MRef, process, _Pid, _Reason}, end. -terminate(Reason, #state { module = Module, - callback_args = Args }) -> +terminate(Reason, State = #state { module = Module, + callback_args = Args }) -> + flush_broadcast_buffer(State), Module:terminate(Args, Reason). code_change(_OldVsn, State, _Extra) -> {ok, State}. +prioritise_cast(flush, _State) -> 1; +prioritise_cast(_ , _State) -> 0. prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; prioritise_info(_ , _State) -> 0. @@ -782,33 +797,62 @@ handle_msg({activity, _NotLeft, _Activity}, State) -> noreply(State) -> - {noreply, State, hibernate}. + {noreply, ensure_broadcast_timer(State), hibernate}. reply(Reply, State) -> - {reply, Reply, State, hibernate}. - -internal_broadcast(Msg, From, State = #state { self = Self, - pub_count = PubCount, - members_state = MembersState, - module = Module, - confirms = Confirms, - callback_args = Args }) -> - PubMsg = {PubCount, Msg}, - Activity = activity_cons(Self, [PubMsg], [], activity_nil()), - ok = maybe_send_activity(activity_finalise(Activity), State), - MembersState1 = - with_member( - fun (Member = #member { pending_ack = PA }) -> - Member #member { pending_ack = queue:in(PubMsg, PA) } - end, Self, MembersState), + {reply, Reply, ensure_broadcast_timer(State), hibernate}. + +ensure_broadcast_timer(State = #state { broadcast_buffer = [], + broadcast_timer = undefined }) -> + State; +ensure_broadcast_timer(State = #state { broadcast_buffer = [], + broadcast_timer = TRef }) -> + timer:cancel(TRef), + State #state { broadcast_timer = undefined }; +ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> + {ok, TRef} = timer:apply_after(?BROADCAST_TIMER, ?MODULE, flush, [self()]), + State #state { broadcast_timer = TRef }; +ensure_broadcast_timer(State) -> + State. + +internal_broadcast(Msg, From, State = #state { self = Self, + pub_count = PubCount, + module = Module, + confirms = Confirms, + callback_args = Args, + broadcast_buffer = Buffer }) -> + Result = Module:handle_msg(Args, Self, Msg), + Buffer1 = [{PubCount, Msg} | Buffer], Confirms1 = case From of none -> Confirms; _ -> queue:in({PubCount, From}, Confirms) end, - handle_callback_result({Module:handle_msg(Args, Self, Msg), - State #state { pub_count = PubCount + 1, - members_state = MembersState1, - confirms = Confirms1 }}). + State1 = State #state { pub_count = PubCount + 1, + confirms = Confirms1, + broadcast_buffer = Buffer1 }, + case From =/= none of + true -> + handle_callback_result({Result, flush_broadcast_buffer(State1)}); + false -> + handle_callback_result( + {Result, State1 #state { broadcast_buffer = Buffer1 }}) + end. + +flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) -> + State; +flush_broadcast_buffer(State = #state { self = Self, + members_state = MembersState, + broadcast_buffer = Buffer }) -> + Pubs = lists:reverse(Buffer), + Activity = activity_cons(Self, Pubs, [], activity_nil()), + ok = maybe_send_activity(activity_finalise(Activity), State), + MembersState1 = with_member( + fun (Member = #member { pending_ack = PA }) -> + PA1 = queue:join(PA, queue:from_list(Pubs)), + Member #member { pending_ack = PA1 } + end, Self, MembersState), + State #state { members_state = MembersState1, + broadcast_buffer = [] }. %% --------------------------------------------------------------------------- @@ -1093,16 +1137,22 @@ maybe_monitor(Self, Self) -> maybe_monitor(Other, _Self) -> erlang:monitor(process, Other). -check_neighbours(State = #state { self = Self, - left = Left, - right = Right, - view = View }) -> +check_neighbours(State = #state { self = Self, + left = Left, + right = Right, + view = View, + broadcast_buffer = Buffer }) -> #view_member { left = VLeft, right = VRight } = fetch_view_member(Self, View), Ver = view_version(View), Left1 = ensure_neighbour(Ver, Self, Left, VLeft), Right1 = ensure_neighbour(Ver, Self, Right, VRight), - State1 = State #state { left = Left1, right = Right1 }, + Buffer1 = case Right1 of + {Self, undefined} -> []; + _ -> Buffer + end, + State1 = State #state { left = Left1, right = Right1, + broadcast_buffer = Buffer1 }, ok = maybe_send_catchup(Right, State1), State1. -- cgit v1.2.1 From 2aeb64f3ce2bf0f0dec90e23b61578ead79781df Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Mar 2011 12:39:04 +0000 Subject: clarify documentation (thanks Emile) --- src/rabbit_variable_queue.erl | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 1b29756b..14c36b12 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -150,10 +150,13 @@ %% responsive. %% %% In the queue we keep track of both messages that are pending -%% delivery and messages that are pending acks. This ensures that -%% purging (deleting the former) and deletion (deleting the former and -%% the latter) are both cheap and do require any scanning through qi -%% segments. +%% delivery and messages that are pending acks. In the event of a +%% queue purge, we only need to load qi segments if the queue has +%% elements in deltas (i.e. it came under significant memory +%% pressure). In the event of a queue deletion, in addition to the +%% preceding, by keeping track of pending acks in RAM, we do not need +%% to search through qi segments looking for messages that are yet to +%% be acknowledged. %% %% Pending acks are recorded in memory either as the tuple {SeqId, %% MsgId, MsgProps} (tuple-form) or as the message itself (message- -- cgit v1.2.1 From 22a202104ae661bdda9ed87977d9f03e1df6f240 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Mar 2011 12:39:48 +0000 Subject: Switch to erlang-nox. --- packaging/debs/Debian/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control index b01d38b3..45f5c5c4 100644 --- a/packaging/debs/Debian/debian/control +++ b/packaging/debs/Debian/debian/control @@ -7,7 +7,7 @@ Standards-Version: 3.8.0 Package: rabbitmq-server Architecture: all -Depends: erlang (>= 1:12.b.3), adduser, logrotate, ${misc:Depends} +Depends: erlang-nox (>= 1:12.b.3), adduser, logrotate, ${misc:Depends} Description: An AMQP server written in Erlang RabbitMQ is an implementation of AMQP, the emerging standard for high performance enterprise messaging. The RabbitMQ server is a robust and -- cgit v1.2.1 From eeb1e5597036ba4464221aa934be00310df5668c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Mar 2011 15:02:33 +0000 Subject: Enforce a bunch of returns --- src/rabbit_msg_store.erl | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 2b162f9d..bb26de64 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -850,16 +850,16 @@ terminate(_Reason, State = #msstate { index_state = IndexState, State1 = case CurHdl of undefined -> State; _ -> State2 = internal_sync(State), - file_handle_cache:close(CurHdl), + ok = file_handle_cache:close(CurHdl), State2 end, State3 = close_all_handles(State1), - store_file_summary(FileSummaryEts, Dir), - [ets:delete(T) || + ok = store_file_summary(FileSummaryEts, Dir), + [true = ets:delete(T) || T <- [FileSummaryEts, DedupCacheEts, FileHandlesEts, CurFileCacheEts]], IndexModule:terminate(IndexState), - store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, - {index_module, IndexModule}], Dir), + ok = store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, + {index_module, IndexModule}], Dir), State3 #msstate { index_state = undefined, current_file_handle = undefined }. @@ -912,13 +912,16 @@ internal_sync(State = #msstate { current_file_handle = CurHdl, false -> [{CRef, MsgIds} | NS] end end, [], CTM), - case {Syncs, CGs} of - {[], []} -> ok; - _ -> file_handle_cache:sync(CurHdl) - end, + ok = case {Syncs, CGs} of + {[], []} -> ok; + _ -> file_handle_cache:sync(CurHdl) + end, [K() || K <- lists:reverse(Syncs)], - [client_confirm(CRef, MsgIds, written, State1) || {CRef, MsgIds} <- CGs], - State1 #msstate { cref_to_msg_ids = dict:new(), on_sync = [] }. + State2 = lists:foldl( + fun ({CRef, MsgIds}, StateN) -> + client_confirm(CRef, MsgIds, written, StateN) + end, State1, CGs), + State2 #msstate { on_sync = [] }. write_action({true, not_found}, _MsgId, State) -> {ignore, undefined, State}; @@ -1466,7 +1469,7 @@ recover_file_summary(false, _Dir) -> recover_file_summary(true, Dir) -> Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME), case ets:file2tab(Path) of - {ok, Tid} -> file:delete(Path), + {ok, Tid} -> ok = file:delete(Path), {true, Tid}; {error, _Error} -> recover_file_summary(false, Dir) end. @@ -1533,9 +1536,7 @@ scan_file_for_valid_messages(Dir, FileName) -> Hdl, filelib:file_size( form_filename(Dir, FileName)), fun scan_fun/2, []), - %% if something really bad has happened, - %% the close could fail, but ignore - file_handle_cache:close(Hdl), + ok = file_handle_cache:close(Hdl), Valid; {error, enoent} -> {ok, [], 0}; {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} @@ -1971,32 +1972,33 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), - file:delete(filename:join(Dir, ?CLEAN_FILENAME)), + ok = file:delete(filename:join(Dir, ?CLEAN_FILENAME)), recover_crashed_compactions(BaseDir), ok. foreach_file(D, Fun, Files) -> - [Fun(filename:join(D, File)) || File <- Files]. + [ok = Fun(filename:join(D, File)) || File <- Files]. foreach_file(D1, D2, Fun, Files) -> - [Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. + [ok = Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Store)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, + CopyFile = fun (Src, Dst) -> {ok, _Bytes} = file:copy(Src, Dst), ok end, case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); false -> FileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), foreach_file(Dir, TmpDir, TransformFile, FileList), foreach_file(Dir, fun file:delete/1, FileList), - foreach_file(TmpDir, Dir, fun file:copy/2, FileList), + foreach_file(TmpDir, Dir, CopyFile, FileList), foreach_file(TmpDir, fun file:delete/1, FileList), ok = file:del_dir(TmpDir) end. transform_msg_file(FileOld, FileNew, TransformFun) -> - rabbit_misc:ensure_parent_dirs_exist(FileNew), + ok = rabbit_misc:ensure_parent_dirs_exist(FileNew), {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, @@ -2009,6 +2011,6 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew), ok end, ok), - file_handle_cache:close(RefOld), - file_handle_cache:close(RefNew), + ok = file_handle_cache:close(RefOld), + ok = file_handle_cache:close(RefNew), ok. -- cgit v1.2.1 From 5fd7264796fbe35dbc7562b1cfc7ef09c4a3f3fb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Mar 2011 17:44:39 +0000 Subject: Renaming bits and pieces for consistency and checking a few more return values, plus other minor fixes --- src/rabbit_mnesia.erl | 1 + src/rabbit_upgrade.erl | 39 ++++++++++++++++++++------------------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 47df1148..75e6eeed 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -528,6 +528,7 @@ move_db() -> ok. copy_db(Destination) -> + ok = ensure_mnesia_not_running(), rabbit_misc:recursive_copy(dir(), Destination). create_tables() -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 6959208b..39a42ef2 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -91,7 +91,7 @@ %% ------------------------------------------------------------------- -ensure_backup() -> +ensure_backup_taken() -> case filelib:is_file(lock_filename()) of false -> case filelib:is_dir(backup_dir()) of false -> ok = take_backup(); @@ -109,7 +109,7 @@ take_backup() -> {error, E} -> throw({could_not_back_up_mnesia_dir, E}) end. -maybe_remove_backup() -> +ensure_backup_removed() -> case filelib:is_dir(backup_dir()) of true -> ok = remove_backup(); _ -> ok @@ -135,6 +135,7 @@ maybe_upgrade_mnesia() -> ok; {ok, Upgrades} -> rabbit:prepare(), %% Ensure we have logs for this + ok = ensure_backup_taken(), case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(AllNodes) @@ -203,18 +204,18 @@ die(Msg, Args) -> primary_upgrade(Upgrades, Nodes) -> Others = Nodes -- [node()], - apply_upgrades( - mnesia, - Upgrades, - fun () -> - force_tables(), - case Others of - [] -> ok; - _ -> info("mnesia upgrades: Breaking cluster~n", []), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) - || Node <- Others] - end - end), + ok = apply_upgrades( + mnesia, + Upgrades, + fun () -> + force_tables(), + case Others of + [] -> ok; + _ -> info("mnesia upgrades: Breaking cluster~n", []), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] + end + end), ok. force_tables() -> @@ -250,17 +251,17 @@ maybe_upgrade_local() -> case rabbit_version:upgrades_required(local) of {error, version_not_available} -> version_not_available; {error, _} = Err -> throw(Err); - {ok, []} -> maybe_remove_backup(); + {ok, []} -> ok = ensure_backup_removed(); {ok, Upgrades} -> mnesia:stop(), - apply_upgrades(local, Upgrades, - fun () -> ok end), - maybe_remove_backup() + ok = ensure_backup_taken(), + ok = apply_upgrades(local, Upgrades, + fun () -> ok end), + ok = ensure_backup_removed() end. %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> - ensure_backup(), ok = rabbit_misc:lock_file(lock_filename()), info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), -- cgit v1.2.1 From 020f72fbafe0a7d62ced75093a01e2d5239ae7ab Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Mar 2011 17:52:19 +0000 Subject: cosmetic(ish): no need to match the return of ensure_* --- src/rabbit_mnesia.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 66436920..963d814e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -78,8 +78,8 @@ status() -> {running_nodes, running_clustered_nodes()}]. init() -> - ok = ensure_mnesia_running(), - ok = ensure_mnesia_dir(), + ensure_mnesia_running(), + ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true), ok. @@ -98,8 +98,8 @@ force_cluster(ClusterNodes) -> %% node. If Force is false, only connections to online nodes are %% allowed. cluster(ClusterNodes, Force) -> - ok = ensure_mnesia_not_running(), - ok = ensure_mnesia_dir(), + ensure_mnesia_not_running(), + ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try ok = init_db(ClusterNodes, Force), @@ -455,7 +455,7 @@ create_schema() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = create_tables(), - ok = ensure_schema_integrity(), + ensure_schema_integrity(), ok = rabbit_upgrade:write_version(). move_db() -> @@ -476,7 +476,7 @@ move_db() -> {error, Reason} -> throw({error, {cannot_backup_mnesia, MnesiaDir, BackupDir, Reason}}) end, - ok = ensure_mnesia_dir(), + ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok. @@ -561,12 +561,12 @@ wait_for_tables(TableNames) -> end. reset(Force) -> - ok = ensure_mnesia_not_running(), + ensure_mnesia_not_running(), Node = node(), case Force of true -> ok; false -> - ok = ensure_mnesia_dir(), + ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), {Nodes, RunningNodes} = try -- cgit v1.2.1 From 60f50338ac0d19486a77ada8e3f7987a47449f25 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 23 Mar 2011 10:10:31 +0000 Subject: 2.4.0 changelog entries for debian and fedora --- packaging/RPMS/Fedora/rabbitmq-server.spec | 3 +++ packaging/debs/Debian/debian/changelog | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index ae9b2059..45af770a 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -120,6 +120,9 @@ done rm -rf %{buildroot} %changelog +* Tue Mar 22 2011 Alexandru Scvortov 2.4.0-1 +- New Upstream Release + * Thu Feb 3 2011 simon@rabbitmq.com 2.3.1-1 - New Upstream Release diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog index 12165dc0..2ca5074f 100644 --- a/packaging/debs/Debian/debian/changelog +++ b/packaging/debs/Debian/debian/changelog @@ -1,3 +1,9 @@ +rabbitmq-server (2.4.0-1) lucid; urgency=low + + * New Upstream Release + + -- Alexandru Scvortov Tue, 22 Mar 2011 17:34:31 +0000 + rabbitmq-server (2.3.1-1) lucid; urgency=low * New Upstream Release -- cgit v1.2.1 -- cgit v1.2.1 From 8b16025be7faf2a5a4d4e403d2150a97e03994be Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 11:53:59 +0000 Subject: New decree is that you're not meant to match against ensure_stuff calls --- src/rabbit_mnesia.erl | 8 +++++--- src/rabbit_upgrade.erl | 10 ++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index ff1b8c97..6ba9e60a 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -437,8 +437,9 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> %% We're the first node up case rabbit_upgrade:maybe_upgrade_local() of ok -> ensure_schema_integrity(); - version_not_available -> schema_ok_or_move() - end; + version_not_available -> ok = schema_ok_or_move() + end, + ok; {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up ensure_version_ok( @@ -462,7 +463,8 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> end; false -> ok end, - ensure_schema_integrity() + ensure_schema_integrity(), + ok end; {error, Reason} -> %% one reason we may end up here is if we try to join diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 39a42ef2..87a22363 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -135,7 +135,7 @@ maybe_upgrade_mnesia() -> ok; {ok, Upgrades} -> rabbit:prepare(), %% Ensure we have logs for this - ok = ensure_backup_taken(), + ensure_backup_taken(), case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(AllNodes) @@ -251,12 +251,14 @@ maybe_upgrade_local() -> case rabbit_version:upgrades_required(local) of {error, version_not_available} -> version_not_available; {error, _} = Err -> throw(Err); - {ok, []} -> ok = ensure_backup_removed(); + {ok, []} -> ensure_backup_removed(), + ok; {ok, Upgrades} -> mnesia:stop(), - ok = ensure_backup_taken(), + ensure_backup_taken(), ok = apply_upgrades(local, Upgrades, fun () -> ok end), - ok = ensure_backup_removed() + ensure_backup_removed(), + ok end. %% ------------------------------------------------------------------- -- cgit v1.2.1 From 2a4f51d39b3f291a7cd7e8e9f084cee8386a8712 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 12:11:01 +0000 Subject: sort out how often and when we do the rabbit:prepare (set up log handlers), and actually make that do the mnesia upgrade. --- src/rabbit.erl | 3 ++- src/rabbit_prelaunch.erl | 4 +--- src/rabbit_upgrade.erl | 10 ++++------ 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 1361d0f4..c7d0d905 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -192,7 +192,8 @@ %%---------------------------------------------------------------------------- prepare() -> - ok = ensure_working_log_handlers(). + ok = ensure_working_log_handlers(), + ok = rabbit_upgrade:maybe_upgrade_mnesia(). start() -> try diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 92ad6a24..8800e8d6 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -235,10 +235,8 @@ post_process_script(ScriptFile) -> {error, {failed_to_load_script, Reason}} end. -process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> - [{apply,{rabbit,prepare,[]}}, Entry]; process_entry(Entry = {apply,{application,start_boot,[mnesia,permanent]}}) -> - [{apply,{rabbit_upgrade,maybe_upgrade_mnesia,[]}}, Entry]; + [{apply,{rabbit,prepare,[]}}, Entry]; process_entry(Entry) -> [Entry]. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 87a22363..f2d38a93 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -101,7 +101,6 @@ ensure_backup_taken() -> end. take_backup() -> - rabbit:prepare(), %% Ensure we have logs for this BackupDir = backup_dir(), case rabbit_mnesia:copy_db(BackupDir) of ok -> info("upgrades: Mnesia dir backed up to ~p~n", @@ -134,12 +133,11 @@ maybe_upgrade_mnesia() -> {ok, []} -> ok; {ok, Upgrades} -> - rabbit:prepare(), %% Ensure we have logs for this ensure_backup_taken(), - case upgrade_mode(AllNodes) of - primary -> primary_upgrade(Upgrades, AllNodes); - secondary -> secondary_upgrade(AllNodes) - end + ok = case upgrade_mode(AllNodes) of + primary -> primary_upgrade(Upgrades, AllNodes); + secondary -> secondary_upgrade(AllNodes) + end end. upgrade_mode(AllNodes) -> -- cgit v1.2.1 From 02a4098c915add7c5f9b9002cf5ff0d6783e091d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 14:40:57 +0000 Subject: Detect discnodeishness prior to suffering disclessness --- src/rabbit_upgrade.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f2d38a93..85f6e88c 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -220,12 +220,14 @@ force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. secondary_upgrade(AllNodes) -> + %% must do this before we wipe out schema + IsDiscNode = is_disc_node(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), %% Note that we cluster with all nodes, rather than all disc nodes %% (as we can't know all disc nodes at this point). This is safe as %% we're not writing the cluster config, just setting up Mnesia. - ClusterNodes = case is_disc_node() of + ClusterNodes = case IsDiscNode of true -> AllNodes; false -> AllNodes -- [node()] end, -- cgit v1.2.1 From 2cb7c4257df8c2ae2407779a4e4ca8b09b6b9782 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 23 Mar 2011 15:34:23 +0000 Subject: Switched to now_ms() --- src/rabbit_error_logger.erl | 3 ++- src/rabbit_misc.erl | 8 +------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 33dfcef9..5f53e430 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -70,6 +70,7 @@ publish1(RoutingKey, Format, Data, LogExch) -> {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, #'P_basic'{content_type = <<"text/plain">>, - timestamp = rabbit_misc:timestamp()}, + timestamp = + rabbit_misc:now_ms() div 1000}, list_to_binary(io_lib:format(Format, Data))), ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 713498c8..e79a58a1 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -52,7 +52,7 @@ unlink_and_capture_exit/1]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). --export([now_ms/0, timestamp/0]). +-export([now_ms/0]). -export([lock_file/1]). -export([const_ok/1, const/1]). -export([ntoa/1, ntoab/1]). @@ -190,7 +190,6 @@ {bad_edge, [digraph:vertex()]}), digraph:vertex(), digraph:vertex()})). -spec(now_ms/0 :: () -> non_neg_integer()). --spec(timestamp/0 ::() -> non_neg_integer()). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). -spec(const_ok/1 :: (any()) -> 'ok'). -spec(const/1 :: (A) -> const(A)). @@ -200,7 +199,6 @@ -endif. --define(EPOCH, {{1970, 1, 1}, {0, 0, 0}}). %%---------------------------------------------------------------------------- method_record_type(Record) -> @@ -793,10 +791,6 @@ get_flag(_, []) -> now_ms() -> timer:now_diff(now(), {0,0,0}) div 1000. -timestamp() -> - calendar:datetime_to_gregorian_seconds(erlang:universaltime()) - - calendar:datetime_to_gregorian_seconds(?EPOCH). - module_attributes(Module) -> case catch Module:module_info(attributes) of {'EXIT', {undef, [{Module, module_info, _} | _]}} -> -- cgit v1.2.1 From c63dcaa034093cd1dc217c06c102127d18ac524f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 16:05:04 +0000 Subject: Record all nodes, don't list them when we refuse to start. --- src/rabbit.erl | 4 ++-- src/rabbit_mnesia.erl | 21 +++++++++------------ src/rabbit_upgrade.erl | 15 ++++++--------- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 1361d0f4..e60886fa 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -233,7 +233,7 @@ rotate_logs(BinarySuffix) -> start(normal, []) -> case erts_version_check() of ok -> - ok = rabbit_mnesia:delete_previously_running_disc_nodes(), + ok = rabbit_mnesia:delete_previously_running_nodes(), {ok, SupPid} = rabbit_sup:start_link(), true = register(rabbit, self()), @@ -246,7 +246,7 @@ start(normal, []) -> end. stop(_State) -> - ok = rabbit_mnesia:record_running_disc_nodes(), + ok = rabbit_mnesia:record_running_nodes(), terminated_ok = error_logger:delete_report_handler(rabbit_error_logger), ok = rabbit_alarm:stop(), ok = case rabbit_mnesia:is_clustered() of diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 47df1148..e661e5e3 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -22,8 +22,8 @@ is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, - record_running_disc_nodes/0, read_previously_running_disc_nodes/0, - delete_previously_running_disc_nodes/0, running_nodes_filename/0]). + record_running_nodes/0, read_previously_running_nodes/0, + delete_previously_running_nodes/0, running_nodes_filename/0]). -export([table_names/0]). @@ -61,9 +61,9 @@ -spec(wait_for_tables/1 :: ([atom()]) -> 'ok'). -spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). -spec(read_cluster_nodes_config/0 :: () -> [node()]). --spec(record_running_disc_nodes/0 :: () -> 'ok'). --spec(read_previously_running_disc_nodes/0 :: () -> [node()]). --spec(delete_previously_running_disc_nodes/0 :: () -> 'ok'). +-spec(record_running_nodes/0 :: () -> 'ok'). +-spec(read_previously_running_nodes/0 :: () -> [node()]). +-spec(delete_previously_running_nodes/0 :: () -> 'ok'). -spec(running_nodes_filename/0 :: () -> file:filename()). -endif. @@ -380,18 +380,15 @@ delete_cluster_nodes_config() -> running_nodes_filename() -> filename:join(dir(), "nodes_running_at_shutdown"). -record_running_disc_nodes() -> +record_running_nodes() -> FileName = running_nodes_filename(), - Nodes = sets:to_list( - sets:intersection( - sets:from_list(nodes_of_type(disc_copies)), - sets:from_list(running_clustered_nodes()))) -- [node()], + Nodes = running_clustered_nodes() -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. rabbit_misc:write_term_file(FileName, [Nodes]), ok. -read_previously_running_disc_nodes() -> +read_previously_running_nodes() -> FileName = running_nodes_filename(), case rabbit_misc:read_term_file(FileName) of {ok, [Nodes]} -> Nodes; @@ -400,7 +397,7 @@ read_previously_running_disc_nodes() -> FileName, Reason}}) end. -delete_previously_running_disc_nodes() -> +delete_previously_running_nodes() -> FileName = running_nodes_filename(), case file:delete(FileName) of ok -> ok; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 6959208b..244be522 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -144,7 +144,7 @@ maybe_upgrade_mnesia() -> upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> - AfterUs = rabbit_mnesia:read_previously_running_disc_nodes(), + AfterUs = rabbit_mnesia:read_previously_running_nodes(), case {is_disc_node(), AfterUs} of {true, []} -> primary; @@ -152,14 +152,11 @@ upgrade_mode(AllNodes) -> Filename = rabbit_mnesia:running_nodes_filename(), die("Cluster upgrade needed but other disc nodes shut " "down after this one.~nPlease first start the last " - "disc node to shut down.~nThe disc nodes that were " - "still running when this one shut down are:~n~n" - " ~p~n~nNote: if several disc nodes were shut down " - "simultaneously they may all~nshow this message. " - "In which case, remove the lock file on one of them " - "and~nstart that node. The lock file on this node " - "is:~n~n ~s ", - [AfterUs, Filename]); + "disc node to shut down.~n~nNote: if several disc " + "nodes were shut down simultaneously they may " + "all~nshow this message. In which case, remove " + "the lock file on one of them and~nstart that node. " + "The lock file on this node is:~n~n ~s ", [Filename]); {false, _} -> die("Cluster upgrade needed but this is a ram node.~n" "Please first start the last disc node to shut down.", -- cgit v1.2.1 From 4bb07e06818a4986507685eda2dff36ab56687c5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 16:16:23 +0000 Subject: Explain --- src/rabbit_error_logger.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 5f53e430..4b13033e 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -70,6 +70,9 @@ publish1(RoutingKey, Format, Data, LogExch) -> {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, #'P_basic'{content_type = <<"text/plain">>, + %% NB: 0-9-1 says it's a "64 bit POSIX + %% timestamp". That's second + %% resolution, not millisecond. timestamp = rabbit_misc:now_ms() div 1000}, list_to_binary(io_lib:format(Format, Data))), -- cgit v1.2.1 From 77400eaae417d65c9a2556d9281a44a2d521342c Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 23 Mar 2011 16:26:33 +0000 Subject: cosmetic --- src/rabbit_error_logger.erl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 4b13033e..3fb0817a 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -67,13 +67,12 @@ publish(_Other, _Format, _Data, _State) -> ok. publish1(RoutingKey, Format, Data, LogExch) -> + %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's + %% second resolution, not millisecond. + Timestamp = rabbit_misc:now_ms() div 1000, {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, #'P_basic'{content_type = <<"text/plain">>, - %% NB: 0-9-1 says it's a "64 bit POSIX - %% timestamp". That's second - %% resolution, not millisecond. - timestamp = - rabbit_misc:now_ms() div 1000}, + timestamp = Timestamp}, list_to_binary(io_lib:format(Format, Data))), ok. -- cgit v1.2.1 From 3b89e0573c46e82557dc2592514907e2a6d0ae71 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 16:50:28 +0000 Subject: ARGH! Trailing line --- src/rabbit_misc.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index e79a58a1..2e9563cf 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -871,4 +871,3 @@ is_process_alive(Pid) -> true -> true; _ -> false end. - -- cgit v1.2.1 From 21ac2b8a105560ab59b62c42d9ce6ad05ea9f34d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 17:08:19 +0000 Subject: Abstract out continuation --- src/rabbit_mnesia.erl | 29 ++++++++++++++--------------- src/rabbit_upgrade.erl | 2 +- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 9ca52327..8bc89880 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -45,7 +45,7 @@ -spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). --spec(init_db/3 :: ([node()], boolean(), boolean()) -> 'ok'). +-spec(init_db/3 :: ([node()], boolean(), rabbit_misc:thunk('ok')) -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). -spec(cluster/1 :: ([node()]) -> 'ok'). -spec(force_cluster/1 :: ([node()]) -> 'ok'). @@ -90,7 +90,8 @@ status() -> init() -> ensure_mnesia_running(), ensure_mnesia_dir(), - ok = init_db(read_cluster_nodes_config(), true, true), + ok = init_db(read_cluster_nodes_config(), true, + fun maybe_upgrade_local_or_record_desired/0), ok. is_db_empty() -> @@ -112,7 +113,7 @@ cluster(ClusterNodes, Force) -> ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes, Force, true), + ok = init_db(ClusterNodes, Force, fun () -> ok end), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() @@ -410,7 +411,7 @@ delete_previously_running_nodes() -> %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow %% connections to offline nodes. -init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> +init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of @@ -449,17 +450,7 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> true -> disc; false -> ram end), - case DoSecondaryLocalUpgrades of - true -> case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ok; - %% If we're just starting up a new - %% node we won't have a version - version_not_available -> - ok = rabbit_version:record_desired() - end; - false -> ok - end, + ok = SecondaryPostMnesiaFun(), ensure_schema_integrity(), ok end; @@ -470,6 +461,14 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) end. +maybe_upgrade_local_or_record_desired() -> + case rabbit_upgrade:maybe_upgrade_local() of + ok -> ok; + %% If we're just starting up a new node we won't have a + %% version + version_not_available -> ok = rabbit_version:record_desired() + end. + schema_ok_or_move() -> case check_schema_integrity() of ok -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 3981b173..5ec08330 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -229,7 +229,7 @@ secondary_upgrade(AllNodes) -> false -> AllNodes -- [node()] end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = rabbit_mnesia:init_db(ClusterNodes, true, false), + ok = rabbit_mnesia:init_db(ClusterNodes, true, fun () -> ok end), ok = rabbit_version:record_desired_for_scope(mnesia), ok. -- cgit v1.2.1 From 330eb98c7bc0e3df4149807dba765263a06c2d3d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 17:23:12 +0000 Subject: Turns out it's very important that we do write the schema_version when call mnesia:cluster --- src/rabbit_mnesia.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 8bc89880..fbcf07ae 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -113,7 +113,8 @@ cluster(ClusterNodes, Force) -> ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes, Force, fun () -> ok end), + ok = init_db(ClusterNodes, Force, + fun maybe_upgrade_local_or_record_desired/0), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() -- cgit v1.2.1 From 5d51177b297d5425741b808fb6f78a2712a0376e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 18:06:16 +0000 Subject: cough --- src/rabbit_upgrade.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 5ec08330..a2abb1e5 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -64,11 +64,11 @@ %% into the boot process by prelaunch before the mnesia application is %% started. By the time Mnesia is started the upgrades have happened %% (on the primary), or Mnesia has been reset (on the secondary) and -%% rabbit_mnesia:init_db/2 can then make the node rejoin the cluster +%% rabbit_mnesia:init_db/3 can then make the node rejoin the cluster %% in the normal way. %% %% The non-mnesia upgrades are then triggered by -%% rabbit_mnesia:init_db/2. Of course, it's possible for a given +%% rabbit_mnesia:init_db/3. Of course, it's possible for a given %% upgrade process to only require Mnesia upgrades, or only require %% non-Mnesia upgrades. In the latter case no Mnesia resets and %% reclusterings occur. -- cgit v1.2.1 From 129628e9f1c9a9d8dc0662de2cc7c50459d622d3 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Wed, 23 Mar 2011 20:12:03 +0000 Subject: removing trap_exit flag in rabbit_channel --- src/rabbit_channel.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 0c12614c..5099bf3f 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -156,7 +156,6 @@ ready_for_close(Pid) -> init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun]) -> - process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), StatsTimer = rabbit_event:init_stats_timer(), State = #ch{state = starting, -- cgit v1.2.1 From 5de9f9dc0af669df764db3a3915fd810918f232c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Mar 2011 17:33:07 +0000 Subject: Correct test for existance of config file --- packaging/common/rabbitmq-server.ocf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf index 94999d0e..d58c48ed 100755 --- a/packaging/common/rabbitmq-server.ocf +++ b/packaging/common/rabbitmq-server.ocf @@ -103,9 +103,9 @@ The IP Port for rabbitmq-server to listen on -Location of the config file +Location of the config file (without the .config suffix) -Config file path +Config file path (without the .config suffix) @@ -189,8 +189,8 @@ rabbit_validate_partial() { } rabbit_validate_full() { - if [ ! -z $RABBITMQ_CONFIG_FILE ] && [ ! -e $RABBITMQ_CONFIG_FILE ]; then - ocf_log err "rabbitmq-server config_file $RABBITMQ_CONFIG_FILE does not exist or is not a file"; + if [ ! -z $RABBITMQ_CONFIG_FILE ] && [ ! -e "${RABBITMQ_CONFIG_FILE}.config" ]; then + ocf_log err "rabbitmq-server config_file ${RABBITMQ_CONFIG_FILE}.config does not exist or is not a file"; exit $OCF_ERR_INSTALLED; fi -- cgit v1.2.1 From b5676465bfed9d4dda43b50043c2d67b5b595e2e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:10:20 +0100 Subject: more sensible order of exchange exports --- src/rabbit_exchange.erl | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a463e570..b5d38b75 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -18,12 +18,13 @@ -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, - info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). --export([callback/3]). +-export([recover/0, callback/3, declare/6, + assert_equivalence/6, assert_args_equivalence/2, check_type/1, + lookup/1, lookup_or_die/1, list/1, + info_keys/0, info/1, info/2, info_all/1, info_all/2, + publish/2, delete/2]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). --export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). %%---------------------------------------------------------------------------- @@ -33,8 +34,10 @@ -type(name() :: rabbit_types:r('exchange')). -type(type() :: atom()). +-type(fun_name() :: atom()). -spec(recover/0 :: () -> 'ok'). +-spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), rabbit_framing:amqp_table()) @@ -72,7 +75,6 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). -endif. @@ -101,6 +103,9 @@ recover_with_bindings(Bs, [X = #exchange{type = Type} | Xs], Bindings) -> recover_with_bindings([], [], []) -> ok. +callback(#exchange{type = XType}, Fun, Args) -> + apply(type_to_module(XType), Fun, Args). + declare(XName, Type, Durable, AutoDelete, Internal, Args) -> X = #exchange{name = XName, type = Type, @@ -294,9 +299,6 @@ maybe_auto_delete(#exchange{auto_delete = true} = X) -> {deleted, X, [], Deletions} -> {deleted, Deletions} end. -callback(#exchange{type = XType}, Fun, Args) -> - apply(type_to_module(XType), Fun, Args). - conditional_delete(X = #exchange{name = XName}) -> case rabbit_binding:has_for_source(XName) of false -> unconditional_delete(X); -- cgit v1.2.1 From 6465f6639b0e73f4080317dec82fac1e7397e090 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:13:36 +0100 Subject: tweak: only invoke rabbit_exchange:callback when absolutely necessary --- src/rabbit_exchange.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b5d38b75..cab6510b 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -131,7 +131,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - callback(Exchange, create, [Tx, Exchange]), + ok = (type_to_module(Type)):create(Tx, Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> -- cgit v1.2.1 From a438017121d00695475817ce3f8fef1a525d4e26 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:25:43 +0100 Subject: cosmetic --- src/rabbit_exchange.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index cab6510b..9d9b07af 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -140,11 +140,6 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> Err end). -%% Used with atoms from records; e.g., the type is expected to exist. -type_to_module(T) -> - {ok, Module} = rabbit_registry:lookup_module(exchange, T), - Module. - %% Used with binaries sent over the wire; the type may not exist. check_type(TypeBin) -> case rabbit_registry:binary_to_type(TypeBin) of @@ -310,3 +305,8 @@ unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_exchange, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. + +%% Used with atoms from records; e.g., the type is expected to exist. +type_to_module(T) -> + {ok, Module} = rabbit_registry:lookup_module(exchange, T), + Module. -- cgit v1.2.1 From 5baea669ec65f80dae2064efa02d039956034575 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 29 Mar 2011 11:25:21 +0100 Subject: Improve documentation of BQ concerning the issuance of confirms --- src/rabbit_backing_queue.erl | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index a15ff846..fe09e400 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -39,13 +39,12 @@ behaviour_info(callbacks) -> %% 2. a boolean indicating whether the queue is durable %% 3. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which accepts a function from - %% state to state and invokes it with the current backing - %% queue state. This is useful for handling events, e.g. when - %% the backing queue does not have its own process to receive - %% such events, or when the processing of an event results in - %% a state transition the queue logic needs to know about - %% (such as messages getting confirmed). + %% 4. an asynchronous callback which accepts a function of type + %% backing-queue-state to backing-queue-state. This callback + %% function can be safely invoked from any process, which + %% makes it useful for passing messages back into the backing + %% queue, especially as the backing queue does not have + %% control of its own mailbox. %% 5. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. {init, 5}, @@ -71,6 +70,31 @@ behaviour_info(callbacks) -> %% Return ids of messages which have been confirmed since %% the last invocation of this function (or initialisation). + %% + %% Message ids should only appear in the result of + %% drain_confirmed under the following circumstances: + %% + %% 1. The message appears in a call to publish_delivered/4 and + %% the first argument (ack_required) is false; or + %% 2. The message is fetched from the queue with fetch/2 and the + %% first argument (ack_required) is false; or + %% 3. The message is acked (ack/2 is called for the message); or + %% 4. The message is fully fsync'd to disk in such a way that the + %% recovery of the message is guaranteed in the event of a + %% crash of this rabbit node (excluding hardware failure). + %% + %% In addition to the above conditions, a message id may only + %% appear in the result of drain_confirmed if + %% #message_properties.needs_confirming = true when the msg was + %% published (through whichever means) to the backing queue. + %% + %% It is legal for the same message id to appear in the results + %% of multiple calls to drain_confirmed, which means that the + %% backing queue is not required to keep track of the which + %% messages it has already confirmed. The confirm will be issued + %% to the publisher the first time the message id appears in the + %% result of drain_confirmed. All subsequent appearances of that + %% message id will be ignored. {drain_confirmed, 1}, %% Drop messages from the head of the queue while the supplied -- cgit v1.2.1 From 95ca5fddb6e4ff306580f2be8945353e52791282 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 29 Mar 2011 11:27:29 +0100 Subject: english --- src/rabbit_backing_queue.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index fe09e400..0ca8d260 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -90,11 +90,11 @@ behaviour_info(callbacks) -> %% %% It is legal for the same message id to appear in the results %% of multiple calls to drain_confirmed, which means that the - %% backing queue is not required to keep track of the which - %% messages it has already confirmed. The confirm will be issued - %% to the publisher the first time the message id appears in the - %% result of drain_confirmed. All subsequent appearances of that - %% message id will be ignored. + %% backing queue is not required to keep track of which messages + %% it has already confirmed. The confirm will be issued to the + %% publisher the first time the message id appears in the result + %% of drain_confirmed. All subsequent appearances of that message + %% id will be ignored. {drain_confirmed, 1}, %% Drop messages from the head of the queue while the supplied -- cgit v1.2.1 From 254a2dadf6814b782b7debfd1e3fe95f8e17739f Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 29 Mar 2011 16:03:36 +0100 Subject: cosmetic --- src/rabbit_control.erl | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8364ecd8..571eb5e4 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -382,12 +382,9 @@ rpc_call(Node, Mod, Fun, Args) -> %% characters. We don't escape characters above 127, since they may %% form part of UTF-8 strings. -escape(Atom) when is_atom(Atom) -> - escape(atom_to_list(Atom)); -escape(Bin) when is_binary(Bin) -> - escape(binary_to_list(Bin)); -escape(L) when is_list(L) -> - escape_char(lists:reverse(L), []). +escape(Atom) when is_atom(Atom) -> escape(atom_to_list(Atom)); +escape(Bin) when is_binary(Bin) -> escape(binary_to_list(Bin)); +escape(L) when is_list(L) -> escape_char(lists:reverse(L), []). escape_char([$\\ | T], Acc) -> escape_char(T, [$\\, $\\ | Acc]); @@ -402,19 +399,15 @@ escape_char([], Acc) -> prettify_amqp_table(Table) -> [{escape(K), prettify_typed_amqp_value(T, V)} || {K, T, V} <- Table]. -prettify_typed_amqp_value(Type, Value) -> - case Type of - longstr -> escape(Value); - table -> prettify_amqp_table(Value); - array -> [prettify_typed_amqp_value(T, V) || {T, V} <- Value]; - _ -> Value - end. +prettify_typed_amqp_value(longstr, Value) -> escape(Value); +prettify_typed_amqp_value(table, Value) -> prettify_amqp_table(Value); +prettify_typed_amqp_value(array, Value) -> [prettify_typed_amqp_value(T, V) || + {T, V} <- Value]; +prettify_typed_amqp_value(_Type, Value) -> Value. %% the slower shutdown on windows required to flush stdout quit(Status) -> case os:type() of - {unix, _} -> - halt(Status); - {win32, _} -> - init:stop(Status) + {unix, _} -> halt(Status); + {win32, _} -> init:stop(Status) end. -- cgit v1.2.1 From 8196a4f395d2a95d6796fc1d6ce3dbbb529c8be3 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:08:50 +0100 Subject: Slightly better explanation for some epmd errors. --- src/rabbit_prelaunch.erl | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 8800e8d6..0b058f76 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -16,7 +16,7 @@ -module(rabbit_prelaunch). --export([start/0, stop/0]). +-export([start/0, stop/0, duplicate_node_check/1]). -define(BaseApps, [rabbit]). -define(ERROR_CODE, 1). @@ -258,8 +258,19 @@ duplicate_node_check(NodeStr) -> terminate(?ERROR_CODE); false -> ok end; - {error, EpmdReason} -> terminate("unexpected epmd error: ~p~n", - [EpmdReason]) + {error, EpmdReason} -> + Tip = case EpmdReason of + address -> + io_lib:format("(Unable to connect to epmd on host " ++ + "~p using tcp port 4369.)", + [NodeHost]); + nxdomain -> + io_lib:format("(Can't resolve host ~p.)", + [NodeHost]); + _ -> [] + end, + terminate("unexpected epmd error: ~p ~s~n", + [EpmdReason, Tip]) end. terminate(Fmt, Args) -> -- cgit v1.2.1 From b88e25d8306d395e6943aa1a5bc3cda62d189318 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:31:46 +0100 Subject: Matthias doesn't like mentioning the port number. --- src/rabbit_prelaunch.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 0b058f76..d8cb2918 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -261,8 +261,8 @@ duplicate_node_check(NodeStr) -> {error, EpmdReason} -> Tip = case EpmdReason of address -> - io_lib:format("(Unable to connect to epmd on host " ++ - "~p using tcp port 4369.)", + io_lib:format("(Unable to connect to epmd on " ++ + "host ~p.)", [NodeHost]); nxdomain -> io_lib:format("(Can't resolve host ~p.)", -- cgit v1.2.1 From 6f45978568372304acee4e138cbac2da59739970 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:36:55 +0100 Subject: cosmetic --- src/rabbit_prelaunch.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index d8cb2918..078ac338 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -261,9 +261,8 @@ duplicate_node_check(NodeStr) -> {error, EpmdReason} -> Tip = case EpmdReason of address -> - io_lib:format("(Unable to connect to epmd on " ++ - "host ~p.)", - [NodeHost]); + io_lib:format("(Unable to connect to epmd on " + "host ~p.)", [NodeHost]); nxdomain -> io_lib:format("(Can't resolve host ~p.)", [NodeHost]); -- cgit v1.2.1 From 8678272e1759f1fe7467582acf58830f1e28a549 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:52:23 +0100 Subject: Finishing touches. --- src/rabbit_prelaunch.erl | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 078ac338..c8ad7c9c 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -259,17 +259,12 @@ duplicate_node_check(NodeStr) -> false -> ok end; {error, EpmdReason} -> - Tip = case EpmdReason of - address -> - io_lib:format("(Unable to connect to epmd on " - "host ~p.)", [NodeHost]); - nxdomain -> - io_lib:format("(Can't resolve host ~p.)", - [NodeHost]); - _ -> [] - end, - terminate("unexpected epmd error: ~p ~s~n", - [EpmdReason, Tip]) + terminate("epmd error for host ~p: ~p (~s)~n", + [NodeHost, EpmdReason, + case EpmdReason of + address -> "unable to establish tcp connection"; + _ -> inet:format_error(EpmdReason) + end]) end. terminate(Fmt, Args) -> -- cgit v1.2.1 From 8fd1963f80794083d1e898436c137fa5cf9c21c0 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 29 Mar 2011 20:58:52 +0100 Subject: remove R13isms --- src/gm.erl | 3 ++- src/gm_soak_test.erl | 7 ++++--- src/rabbit_amqqueue_process.erl | 14 +++++++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 5b3623cf..1edcde11 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -516,7 +516,8 @@ flush(Server) -> init([GroupName, Module, Args]) -> - random:seed(now()), + {MegaSecs, Secs, MicroSecs} = now(), + random:seed(MegaSecs, Secs, MicroSecs), gen_server2:cast(self(), join), Self = self(), {ok, #state { self = Self, diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl index 1f8832a6..dae42ac7 100644 --- a/src/gm_soak_test.erl +++ b/src/gm_soak_test.erl @@ -35,7 +35,7 @@ with_state(Fun) -> inc() -> case 1 + get(count) of - 100000 -> Now = os:timestamp(), + 100000 -> Now = now(), Start = put(ts, Now), Diff = timer:now_diff(Now, Start), Rate = 100000 / (Diff / 1000000), @@ -48,7 +48,7 @@ joined([], Members) -> io:format("Joined ~p (~p members)~n", [self(), length(Members)]), put(state, dict:from_list([{Member, empty} || Member <- Members])), put(count, 0), - put(ts, os:timestamp()), + put(ts, now()), ok. members_changed([], Births, Deaths) -> @@ -101,7 +101,8 @@ terminate([], Reason) -> spawn_member() -> spawn_link( fun () -> - random:seed(now()), + {MegaSecs, Secs, MicroSecs} = now(), + random:seed(MegaSecs, Secs, MicroSecs), %% start up delay of no more than 10 seconds timer:sleep(random:uniform(10000)), {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 3f5758ce..2b0fe17e 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -428,11 +428,19 @@ confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> {CMs, MTC0} end end, {gb_trees:empty(), MTC}, MsgIds), - gb_trees:map(fun(ChPid, MsgSeqNos) -> - rabbit_channel:confirm(ChPid, MsgSeqNos) - end, CMs), + gb_trees_foreach(fun(ChPid, MsgSeqNos) -> + rabbit_channel:confirm(ChPid, MsgSeqNos) + end, CMs), State#q{msg_id_to_channel = MTC1}. +gb_trees_foreach(_, none) -> + ok; +gb_trees_foreach(Fun, {Key, Val, It}) -> + Fun(Key, Val), + gb_trees_foreach(Fun, gb_trees:next(It)); +gb_trees_foreach(Fun, Tree) -> + gb_trees_foreach(Fun, gb_trees:next(gb_trees:iterator(Tree))). + gb_trees_cons(Key, Value, Tree) -> case gb_trees:lookup(Key, Tree) of {value, Values} -> gb_trees:update(Key, [Value | Values], Tree); -- cgit v1.2.1 From 81e65665ede1096e482215d4c9c516f6a8e81c2d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:20:43 +0100 Subject: test refactor: mock the writer only once --- src/rabbit_tests.erl | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ca046c91..be868215 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1178,9 +1178,15 @@ test_server_status() -> passed. -test_spawn(Receiver) -> +test_writer(Pid) -> + receive + shutdown -> ok; + {send_command, Method} -> Pid ! Method, test_writer(Pid) + end. + +test_spawn() -> Me = self(), - Writer = spawn(fun () -> Receiver(Me) end), + Writer = spawn(fun () -> test_writer(Me) end), {ok, Ch} = rabbit_channel:start_link( 1, Me, Writer, Me, rabbit_framing_amqp_0_9_1, user(<<"guest">>), <<"/">>, [], self(), @@ -1198,15 +1204,6 @@ user(Username) -> impl = #internal_user{username = Username, is_admin = true}}. -test_statistics_receiver(Pid) -> - receive - shutdown -> - ok; - {send_command, Method} -> - Pid ! Method, - test_statistics_receiver(Pid) - end. - test_statistics_event_receiver(Pid) -> receive Foo -> @@ -1228,17 +1225,8 @@ test_statistics_receive_event1(Ch, Matcher) -> after 1000 -> throw(failed_to_receive_event) end. -test_confirms_receiver(Pid) -> - receive - shutdown -> - ok; - {send_command, Method} -> - Pid ! Method, - test_confirms_receiver(Pid) - end. - test_confirms() -> - {_Writer, Ch} = test_spawn(fun test_confirms_receiver/1), + {_Writer, Ch} = test_spawn(), DeclareBindDurableQueue = fun() -> rabbit_channel:do(Ch, #'queue.declare'{durable = true}), @@ -1311,7 +1299,7 @@ test_statistics() -> %% by far the most complex code though. %% Set up a channel and queue - {_Writer, Ch} = test_spawn(fun test_statistics_receiver/1), + {_Writer, Ch} = test_spawn(), rabbit_channel:do(Ch, #'queue.declare'{}), QName = receive #'queue.declare_ok'{queue = Q0} -> Q0 @@ -1462,18 +1450,8 @@ test_delegates_sync(SecondaryNode) -> passed. -test_queue_cleanup_receiver(Pid) -> - receive - shutdown -> - ok; - {send_command, Method} -> - Pid ! Method, - test_queue_cleanup_receiver(Pid) - end. - - test_queue_cleanup(_SecondaryNode) -> - {_Writer, Ch} = test_spawn(fun test_queue_cleanup_receiver/1), + {_Writer, Ch} = test_spawn(), rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }), receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} -> ok -- cgit v1.2.1 From 88f1f2f23d4cc3cbd4612f72d1cd40830aa2c8b6 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:21:21 +0100 Subject: cosmetic --- src/rabbit_tests.erl | 130 ++++++++++++++++++++++++--------------------------- 1 file changed, 60 insertions(+), 70 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index be868215..ea7d1343 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -598,39 +598,37 @@ test_topic_matching() -> exchange_op_callback(X, create, []), %% add some bindings - Bindings = lists:map( - fun ({Key, Q}) -> - #binding{source = XName, - key = list_to_binary(Key), - destination = #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)}} - end, [{"a.b.c", "t1"}, - {"a.*.c", "t2"}, - {"a.#.b", "t3"}, - {"a.b.b.c", "t4"}, - {"#", "t5"}, - {"#.#", "t6"}, - {"#.b", "t7"}, - {"*.*", "t8"}, - {"a.*", "t9"}, - {"*.b.c", "t10"}, - {"a.#", "t11"}, - {"a.#.#", "t12"}, - {"b.b.c", "t13"}, - {"a.b.b", "t14"}, - {"a.b", "t15"}, - {"b.c", "t16"}, - {"", "t17"}, - {"*.*.*", "t18"}, - {"vodka.martini", "t19"}, - {"a.b.c", "t20"}, - {"*.#", "t21"}, - {"#.*.#", "t22"}, - {"*.#.#", "t23"}, - {"#.#.#", "t24"}, - {"*", "t25"}, - {"#.b.#", "t26"}]), + Bindings = [#binding{source = XName, + key = list_to_binary(Key), + destination = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}} || + {Key, Q} <- [{"a.b.c", "t1"}, + {"a.*.c", "t2"}, + {"a.#.b", "t3"}, + {"a.b.b.c", "t4"}, + {"#", "t5"}, + {"#.#", "t6"}, + {"#.b", "t7"}, + {"*.*", "t8"}, + {"a.*", "t9"}, + {"*.b.c", "t10"}, + {"a.#", "t11"}, + {"a.#.#", "t12"}, + {"b.b.c", "t13"}, + {"a.b.b", "t14"}, + {"a.b", "t15"}, + {"b.c", "t16"}, + {"", "t17"}, + {"*.*.*", "t18"}, + {"vodka.martini", "t19"}, + {"a.b.c", "t20"}, + {"*.#", "t21"}, + {"#.*.#", "t22"}, + {"*.#.#", "t23"}, + {"#.#.#", "t24"}, + {"*", "t25"}, + {"#.b.#", "t26"}]], lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), @@ -669,22 +667,23 @@ test_topic_matching() -> ordsets:from_list(RemovedBindings))), %% test some matches - test_topic_expect_match(X, - [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", - "t23", "t24", "t26"]}, - {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", - "t22", "t23", "t24", "t26"]}, - {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", - "t23", "t24", "t26"]}, - {"", ["t6", "t17", "t24"]}, - {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, - {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, - {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, - {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, - {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", - "t24", "t26"]}, - {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, - {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), + test_topic_expect_match( + X, + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", + "t23", "t24", "t26"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", + "t22", "t23", "t24", "t26"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", + "t23", "t24", "t26"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", + "t24", "t26"]}, + {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), %% remove the entire exchange exchange_op_callback(X, delete, [RemainingBindings]), @@ -1206,9 +1205,7 @@ user(Username) -> test_statistics_event_receiver(Pid) -> receive - Foo -> - Pid ! Foo, - test_statistics_event_receiver(Pid) + Foo -> Pid ! Foo, test_statistics_event_receiver(Pid) end. test_statistics_receive_event(Ch, Matcher) -> @@ -1252,10 +1249,9 @@ test_confirms() -> QPid1 = Q1#amqqueue.pid, %% Enable confirms rabbit_channel:do(Ch, #'confirm.select'{}), - receive #'confirm.select_ok'{} -> - ok - after 1000 -> - throw(failed_to_enable_confirms) + receive + #'confirm.select_ok'{} -> ok + after 1000 -> throw(failed_to_enable_confirms) end, %% Publish a message rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>, @@ -1267,25 +1263,19 @@ test_confirms() -> QPid1 ! boom, %% Wait for a nack receive - #'basic.nack'{} -> - ok; - #'basic.ack'{} -> - throw(received_ack_instead_of_nack) - after 2000 -> - throw(did_not_receive_nack) + #'basic.nack'{} -> ok; + #'basic.ack'{} -> throw(received_ack_instead_of_nack) + after 2000 -> throw(did_not_receive_nack) end, receive - #'basic.ack'{} -> - throw(received_ack_when_none_expected) - after 1000 -> - ok + #'basic.ack'{} -> throw(received_ack_when_none_expected) + after 1000 -> ok end, %% Cleanup rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), - receive #'queue.delete_ok'{} -> - ok - after 1000 -> - throw(failed_to_cleanup_queue) + receive + #'queue.delete_ok'{} -> ok + after 1000 -> throw(failed_to_cleanup_queue) end, unlink(Ch), ok = rabbit_channel:shutdown(Ch), -- cgit v1.2.1 From d8f2e891ce8c4a42184e084b582724546c379495 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Wed, 30 Mar 2011 11:47:30 +0100 Subject: adding reporting exception to supervisor2 --- src/supervisor2.erl | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 1a240856..2c0874ab 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -38,6 +38,10 @@ %% child is a supervisor and it exits normally (i.e. with reason of %% 'shutdown') then the child's parent also exits normally. %% +%% 5) Added an exception to reporting: If a child has MaxR = 0 and it +%% terminates with reason {shutdown, _}, then supervisor2 behaves +%% as supervisor *except* it does not report anything to error_logger. +%% %% All modifications are (C) 2010-2011 VMware, Inc. %% %% %CopyrightBegin% @@ -542,8 +546,7 @@ do_restart({RestartType, Delay}, Reason, Child, State) -> {ok, state_del_child(Child, NState)} end; do_restart(permanent, Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name), - restart(Child, State); + maybe_report_and_restart(Reason, Child, State); do_restart(intrinsic, normal, Child, State) -> {shutdown, state_del_child(Child, State)}; do_restart(intrinsic, shutdown, Child = #child{child_type = supervisor}, @@ -557,13 +560,24 @@ do_restart(_, shutdown, Child, State) -> {ok, NState}; do_restart(Type, Reason, Child, State) when Type =:= transient orelse Type =:= intrinsic -> - report_error(child_terminated, Reason, Child, State#state.name), - restart(Child, State); + maybe_report_and_restart(Reason, Child, State); do_restart(temporary, Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name), + maybe_report(Reason, Child, State), NState = state_del_child(Child, State), {ok, NState}. +maybe_report_and_restart({shutdown, _}, Child, State = #state{intensity = 0}) -> + {terminate, NState} = add_restart(State), + {shutdown, state_del_child(Child, NState)}; +maybe_report_and_restart(Reason, Child, State) -> + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State). + +maybe_report({shutdown, _}, _Child, #state{intensity = 0}) -> + ok; +maybe_report(Reason, Child, State) -> + report_error(child_terminated, Reason, Child, State#state.name). + restart(Child, State) -> case add_restart(State) of {ok, NState} -> -- cgit v1.2.1 From 92e9e3748a98d2f76f22cabae854772c168c5637 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 30 Mar 2011 12:36:38 +0100 Subject: Shutup, dialyzer --- src/rabbit_tests.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ea7d1343..fb1c9a34 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -700,9 +700,14 @@ test_topic_expect_match(X, List) -> lists:foreach( fun ({Key, Expected}) -> BinKey = list_to_binary(Key), + Message = rabbit_basic:message(X#exchange.name, BinKey, + #'P_basic'{}, <<>>), Res = rabbit_exchange_type_topic:route( - X, #delivery{message = #basic_message{routing_keys = - [BinKey]}}), + X, #delivery{mandatory = false, + immediate = false, + txn = none, + sender = self(), + message = Message}), ExpectedRes = lists:map( fun (Q) -> #resource{virtual_host = <<"/">>, kind = queue, -- cgit v1.2.1 From 1312b972cf32478b68223af795a40c979e65b4d3 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 12:45:54 +0100 Subject: cosmetic: put 'rabbitmqctl wait' code in the right place --- src/rabbit_control.erl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 571eb5e4..4a2858f0 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -127,6 +127,8 @@ usage() -> io:format("~s", [rabbit_ctl_usage:usage()]), quit(1). +%%---------------------------------------------------------------------------- + action(stop, Node, [], _Opts, Inform) -> Inform("Stopping and halting node ~p", [Node]), call(Node, {rabbit, stop_and_halt, []}); @@ -159,6 +161,10 @@ action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, force_cluster, [ClusterNodes]); +action(wait, Node, [], _Opts, Inform) -> + Inform("Waiting for ~p", [Node]), + wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). + action(status, Node, [], _Opts, Inform) -> Inform("Status of node ~p", [Node]), case call(Node, {rabbit, status, []}) of @@ -294,9 +300,7 @@ action(list_permissions, Node, [], Opts, Inform) -> display_list(call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]})); -action(wait, Node, [], _Opts, Inform) -> - Inform("Waiting for ~p", [Node]), - wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). +%%---------------------------------------------------------------------------- wait_for_application(Node, Attempts) -> case rpc_call(Node, application, which_applications, [infinity]) of -- cgit v1.2.1 From a4c348672a43acd05303a38af8c08196924fb650 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 13:38:07 +0100 Subject: Make rabbitmq-server compilable. --- src/rabbit_control.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 4a2858f0..6fb465b5 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -163,7 +163,7 @@ action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> action(wait, Node, [], _Opts, Inform) -> Inform("Waiting for ~p", [Node]), - wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). + wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS); action(status, Node, [], _Opts, Inform) -> Inform("Status of node ~p", [Node]), @@ -298,7 +298,7 @@ action(list_permissions, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost ~p", [VHost]), display_list(call(Node, {rabbit_auth_backend_internal, - list_vhost_permissions, [VHost]})); + list_vhost_permissions, [VHost]})). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From f5638b24b218c063a93a055b426f079dda5c8c88 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 13:50:36 +0100 Subject: Mark network connections as network. --- src/rabbit_reader.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 609bb43f..ff0e9269 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -681,7 +681,7 @@ handle_method0(#'connection.open'{virtual_host = VHostPath}, State#v1{connection_state = running, connection = NewConnection}), rabbit_event:notify(connection_created, - infos(?CREATION_EVENT_KEYS, State1)), + [{type, network}|infos(?CREATION_EVENT_KEYS, State1)]), rabbit_event:if_enabled(StatsTimer, fun() -> internal_emit_stats(State1) end), State1; -- cgit v1.2.1 From d4b7b576f88362839631499a19b6f9695ac03cdf Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 13:55:33 +0100 Subject: Add space --- src/rabbit_reader.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index ff0e9269..42af91a8 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -681,7 +681,8 @@ handle_method0(#'connection.open'{virtual_host = VHostPath}, State#v1{connection_state = running, connection = NewConnection}), rabbit_event:notify(connection_created, - [{type, network}|infos(?CREATION_EVENT_KEYS, State1)]), + [{type, network} | + infos(?CREATION_EVENT_KEYS, State1)]), rabbit_event:if_enabled(StatsTimer, fun() -> internal_emit_stats(State1) end), State1; -- cgit v1.2.1 From 26b7b740905b3c15559c81cc0e945890ae8ec4d4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 14:06:52 +0100 Subject: Make sure Attempts does not go negative. --- src/rabbit_control.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 6fb465b5..1af91f4c 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -304,10 +304,9 @@ action(list_permissions, Node, [], Opts, Inform) -> wait_for_application(Node, Attempts) -> case rpc_call(Node, application, which_applications, [infinity]) of - {badrpc, _} = E -> NewAttempts = Attempts - 1, - case NewAttempts of + {badrpc, _} = E -> case Attempts of 0 -> E; - _ -> wait_for_application0(Node, NewAttempts) + _ -> wait_for_application0(Node, Attempts - 1) end; Apps -> case proplists:is_defined(rabbit, Apps) of %% We've seen the node up; if it goes down -- cgit v1.2.1 From 6019f2da620f17174e677b175ca938f335d8390e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 15:31:45 +0100 Subject: Only recover exchanges and bindings that need to be recovered. --- src/rabbit_binding.erl | 11 +++++++---- src/rabbit_exchange.erl | 9 ++++++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 6167790e..359d4287 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -96,10 +96,13 @@ recover() -> rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - {_, ReverseRoute} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, ReverseRoute, write), - [B | Acc] + case mnesia:read({rabbit_route, B}) of + [] -> {_, Rev} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, Rev, write), + [B | Acc]; + [_] -> Acc + end end, [], rabbit_durable_route). exists(Binding) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 9d9b07af..0d13a684 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -84,9 +84,12 @@ recover() -> Xs = rabbit_misc:table_fold( - fun (X, Acc) -> - ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc] + fun (X = #exchange{name = XName}, Acc) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc]; + [_] -> Acc + end end, [], rabbit_durable_exchange), Bs = rabbit_binding:recover(), recover_with_bindings( -- cgit v1.2.1 From ba51aa80666fedded2c71ee57fe233906fa795a0 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 31 Mar 2011 12:07:17 +0100 Subject: Don't transform markers when upgrading messages --- src/rabbit_msg_store.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index bb26de64..9b8ddae0 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -2007,7 +2007,10 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), fun({MsgId, _Size, _Offset, BinMsg}, ok) -> - {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), + {ok, MsgNew} = case binary_to_term(BinMsg) of + <<>> -> {ok, <<>>}; %% dying client marker + Msg -> TransformFun(Msg) + end, {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew), ok end, ok), -- cgit v1.2.1 From 992e24b44109679d60ec7dc808548b9d57efffd4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 12:43:08 +0100 Subject: Change exchange type API to not distinguish between creating and recovering, and to allow recovering bindings. Recover bindings when needed. --- include/rabbit_exchange_type_spec.hrl | 9 ++++---- src/rabbit_binding.erl | 3 ++- src/rabbit_exchange.erl | 42 ++++++++++++++++++++++++----------- src/rabbit_exchange_type.erl | 13 +++++------ src/rabbit_exchange_type_direct.erl | 9 ++++---- src/rabbit_exchange_type_fanout.erl | 7 +++--- src/rabbit_exchange_type_headers.erl | 7 +++--- src/rabbit_exchange_type_topic.erl | 18 +++++++++------ src/rabbit_tests.erl | 2 +- 9 files changed, 62 insertions(+), 48 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 45c475d8..8163b6f2 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -20,13 +20,12 @@ -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). --spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). --spec(recover/2 :: (rabbit_types:exchange(), - [rabbit_types:binding()]) -> 'ok'). +-spec(start/3 :: (boolean(), rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). -spec(delete/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(add_binding/3 :: (boolean(), rabbit_types:exchange(), - rabbit_types:binding()) -> 'ok'). +-spec(add_bindings/3 :: (boolean(), rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). -spec(remove_bindings/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(assert_args_equivalence/2 :: diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 359d4287..84ae789c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -129,7 +129,8 @@ add(Binding, InnerFun) -> fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback( - Src, add_binding, [Tx, Src, B]), + Src, add_bindings, + [Tx, Src, [B]]), rabbit_event:notify_if( not Tx, binding_created, info(B)) end; diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 0d13a684..f6ab9d74 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -92,18 +92,34 @@ recover() -> end end, [], rabbit_durable_exchange), Bs = rabbit_binding:recover(), - recover_with_bindings( - lists:keysort(#binding.source, Bs), - lists:keysort(#exchange.name, Xs), []). - -recover_with_bindings([B = #binding{source = XName} | Rest], - Xs = [#exchange{name = XName} | _], - Bindings) -> - recover_with_bindings(Rest, Xs, [B | Bindings]); -recover_with_bindings(Bs, [X = #exchange{type = Type} | Xs], Bindings) -> - (type_to_module(Type)):recover(X, Bindings), - recover_with_bindings(Bs, Xs, []); -recover_with_bindings([], [], []) -> + {RecXBs, NoRecXBs} = filter_recovered_exchanges(Xs, Bs), + ok = recovery_callbacks(RecXBs, NoRecXBs). + +%% TODO strip out bindings that are to queues not on this node +filter_recovered_exchanges(Xs, Bs) -> + RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), + lists:foldl( + fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> + case dict:find(Src, RecXs) of + {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; + error -> {ok, X} = lookup(Src), + {RecXBs, dict:append(X, B, NoRecXBs)} + end + end, {dict:new(), dict:new()}, Bs). + +recovery_callbacks(RecXBs, NoRecXBs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + dict:map(fun (X = #exchange{type = Type}, Bs) -> + io:format("Recover X ~p~n", [X]), + (type_to_module(Type)):start(Tx, X, Bs) + end, RecXBs), + dict:map(fun (X = #exchange{type = Type}, Bs) -> + io:format("Recover Bs ~p~n", [Bs]), + (type_to_module(Type)):add_bindings(Tx, X, Bs) + end, NoRecXBs) + end), ok. callback(#exchange{type = XType}, Fun, Args) -> @@ -134,7 +150,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - ok = (type_to_module(Type)):create(Tx, Exchange), + ok = (type_to_module(Type)):start(Tx, Exchange, []), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 547583e9..ad08eb86 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -26,17 +26,14 @@ behaviour_info(callbacks) -> %% called BEFORE declaration, to check args etc; may exit with #amqp_error{} {validate, 1}, - %% called after declaration when previously absent - {create, 2}, + %% called after declaration and recovery + {start, 3}, - %% called when recovering - {recover, 2}, - - %% called after exchange deletion. + %% called after exchange (auto)deletion. {delete, 3}, - %% called after a binding has been added - {add_binding, 3}, + %% called after a binding has been added or bindings have been recovered + {add_bindings, 3}, %% called after bindings have been deleted. {remove_bindings, 3}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 349c2f6e..1658c9f8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -20,8 +20,8 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, - add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([validate/1, start/3, delete/3, + add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). -rabbit_boot_step({?MODULE, @@ -40,10 +40,9 @@ route(#exchange{name = Name}, rabbit_router:match_routing_key(Name, Routes). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. +start(_Tx, _X, _Bs) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_binding(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index bc5293c8..83afdd71 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, add_binding/3, +-export([validate/1, start/3, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -39,10 +39,9 @@ route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. +start(_Tx, _X, _Bs) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_binding(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index d3529b06..0fe8404f 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, add_binding/3, +-export([validate/1, start/3, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -113,10 +113,9 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. +start(_Tx, _X, _Bs) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_binding(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..52f468ee 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, add_binding/3, +-export([validate/1, start/3, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -47,13 +47,14 @@ route(#exchange{name = X}, end || RKey <- Routes]). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_Exchange, Bs) -> +start(true, _X, Bs) -> rabbit_misc:execute_mnesia_transaction( fun () -> lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end). + end); +start(false, _X, _Bs) -> + ok. delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), @@ -62,9 +63,12 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_binding(true, _Exchange, Binding) -> - internal_add_binding(Binding); -add_binding(false, _Exchange, _Binding) -> +add_bindings(true, _X, Bs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> + lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) + end); +add_bindings(false, _X, _Bs) -> ok. remove_bindings(true, #exchange{name = X}, Bs) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index fb1c9a34..075258e5 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -629,7 +629,7 @@ test_topic_matching() -> {"#.#.#", "t24"}, {"*", "t25"}, {"#.b.#", "t26"}]], - lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, + lists:foreach(fun (B) -> exchange_op_callback(X, add_bindings, [[B]]) end, Bindings), %% test some matches -- cgit v1.2.1 From 5c36ec391adbf2d949cfbce79efab20eb03b7116 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 13:08:50 +0100 Subject: Only recover bindings that are to exchanges or to queues that are on this node. --- src/rabbit_binding.erl | 29 +++++++++++++++++++++++------ src/rabbit_exchange.erl | 3 --- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 84ae789c..c9cf0a39 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -96,15 +96,32 @@ recover() -> rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case mnesia:read({rabbit_route, B}) of - [] -> {_, Rev} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, Rev, write), - [B | Acc]; - [_] -> Acc + case should_recover(B) of + true -> {_, Rev} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, Rev, write), + [B | Acc]; + false -> Acc end end, [], rabbit_durable_route). +should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}) -> + case mnesia:read({rabbit_route, B}) of + [] -> case Kind of + exchange -> true; + queue -> case mnesia:read({rabbit_durable_queue, Dest}) of + [Q] -> #amqqueue{pid = Pid} = Q, + Node = node(), + case node(Pid) of + Node -> true; + _ -> false + end; + _ -> false + end + end; + _ -> false + end. + exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index f6ab9d74..572a0b70 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -95,7 +95,6 @@ recover() -> {RecXBs, NoRecXBs} = filter_recovered_exchanges(Xs, Bs), ok = recovery_callbacks(RecXBs, NoRecXBs). -%% TODO strip out bindings that are to queues not on this node filter_recovered_exchanges(Xs, Bs) -> RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), lists:foldl( @@ -112,11 +111,9 @@ recovery_callbacks(RecXBs, NoRecXBs) -> fun () -> ok end, fun (ok, Tx) -> dict:map(fun (X = #exchange{type = Type}, Bs) -> - io:format("Recover X ~p~n", [X]), (type_to_module(Type)):start(Tx, X, Bs) end, RecXBs), dict:map(fun (X = #exchange{type = Type}, Bs) -> - io:format("Recover Bs ~p~n", [Bs]), (type_to_module(Type)):add_bindings(Tx, X, Bs) end, NoRecXBs) end), -- cgit v1.2.1 From 6fe41f3e724cd65792916b24e50748d0bdc0e4be Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 13:25:18 +0100 Subject: Oops. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 075258e5..9b122a02 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -595,7 +595,7 @@ test_topic_matching() -> auto_delete = false, arguments = []}, %% create rabbit_exchange_type_topic:validate(X), - exchange_op_callback(X, create, []), + exchange_op_callback(X, start, [[]]), %% add some bindings Bindings = [#binding{source = XName, -- cgit v1.2.1 From ee0deb4dd37985bdfef24cda035121ba5e02f82b Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 31 Mar 2011 14:59:46 +0100 Subject: reverting previous changes; treating {shutdown, _} exit reasons the same as normal in supervisor2 --- src/supervisor2.erl | 150 ++++++++++++++++++++++++---------------------------- 1 file changed, 70 insertions(+), 80 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 2c0874ab..73316db9 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -38,9 +38,8 @@ %% child is a supervisor and it exits normally (i.e. with reason of %% 'shutdown') then the child's parent also exits normally. %% -%% 5) Added an exception to reporting: If a child has MaxR = 0 and it -%% terminates with reason {shutdown, _}, then supervisor2 behaves -%% as supervisor *except* it does not report anything to error_logger. +%% 5) normal, shutdown and {shutdown, _} exit reasons are all treated the same +%% (i.e. are regarded as normal exits) %% %% All modifications are (C) 2010-2011 VMware, Inc. %% @@ -116,10 +115,10 @@ behaviour_info(_Other) -> %%% --------------------------------------------------- start_link(Mod, Args) -> gen_server:start_link(?MODULE, {self, Mod, Args}, []). - + start_link(SupName, Mod, Args) -> gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []). - + %%% --------------------------------------------------- %%% Interface functions. %%% --------------------------------------------------- @@ -162,9 +161,9 @@ delayed_restart(Supervisor, RestartDetails) -> gen_server:cast(Supervisor, {delayed_restart, RestartDetails}). %%% --------------------------------------------------- -%%% +%%% %%% Initialize the supervisor. -%%% +%%% %%% --------------------------------------------------- init({SupName, Mod, Args}) -> process_flag(trap_exit, true), @@ -183,7 +182,7 @@ init({SupName, Mod, Args}) -> Error -> {stop, {bad_return, {Mod, init, Error}}} end. - + init_children(State, StartSpec) -> SupName = State#state.name, case check_startspec(StartSpec) of @@ -213,7 +212,7 @@ init_dynamic(_State, StartSpec) -> %% Func: start_children/2 %% Args: Children = [#child] in start order %% SupName = {local, atom()} | {global, atom()} | {pid(),Mod} -%% Purpose: Start all children. The new list contains #child's +%% Purpose: Start all children. The new list contains #child's %% with pids. %% Returns: {ok, NChildren} | {error, NChildren} %% NChildren = [#child] in termination order (reversed @@ -245,7 +244,7 @@ do_start_child(SupName, Child) -> NChild = Child#child{pid = Pid}, report_progress(NChild, SupName), {ok, Pid, Extra}; - ignore -> + ignore -> {ok, undefined}; {error, What} -> {error, What}; What -> {error, What} @@ -264,23 +263,23 @@ do_start_child_i(M, F, A) -> What -> {error, What} end. - + %%% --------------------------------------------------- -%%% +%%% %%% Callback functions. -%%% +%%% %%% --------------------------------------------------- handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) -> #child{mfa = {M, F, A}} = hd(State#state.children), Args = A ++ EArgs, case do_start_child_i(M, F, Args) of {ok, Pid} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid}, NState}; {ok, Pid, Extra} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid, Extra}, NState}; What -> @@ -373,7 +372,7 @@ handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> %%% Hopefully cause a function-clause as there is no API function %%% that utilizes cast. handle_cast(null, State) -> - error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", + error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", []), {noreply, State}. @@ -390,7 +389,7 @@ handle_info({'EXIT', Pid, Reason}, State) -> end; handle_info(Msg, State) -> - error_logger:error_msg("Supervisor received unexpected message: ~p~n", + error_logger:error_msg("Supervisor received unexpected message: ~p~n", [Msg]), {noreply, State}. %% @@ -440,13 +439,13 @@ check_flags({Strategy, MaxIntensity, Period}) -> check_flags(What) -> {bad_flags, What}. -update_childspec(State, StartSpec) when ?is_simple(State) -> - case check_startspec(StartSpec) of - {ok, [Child]} -> - {ok, State#state{children = [Child]}}; - Error -> - {error, Error} - end; +update_childspec(State, StartSpec) when ?is_simple(State) -> + case check_startspec(StartSpec) of + {ok, [Child]} -> + {ok, State#state{children = [Child]}}; + Error -> + {error, Error} + end; update_childspec(State, StartSpec) -> case check_startspec(StartSpec) of @@ -467,7 +466,7 @@ update_childspec1([Child|OldC], Children, KeepOld) -> end; update_childspec1([], Children, KeepOld) -> % Return them in (keeped) reverse start order. - lists:reverse(Children ++ KeepOld). + lists:reverse(Children ++ KeepOld). update_chsp(OldCh, Children) -> case lists:map(fun (Ch) when OldCh#child.name =:= Ch#child.name -> @@ -481,7 +480,7 @@ update_chsp(OldCh, Children) -> NewC -> {ok, NewC} end. - + %%% --------------------------------------------------- %%% Start a new child. %%% --------------------------------------------------- @@ -493,12 +492,12 @@ handle_start_child(Child, State) -> {ok, Pid} -> Children = State#state.children, {{ok, Pid}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {ok, Pid, Extra} -> Children = State#state.children, {{ok, Pid, Extra}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {error, What} -> {{error, {What, Child}}, State} @@ -546,37 +545,28 @@ do_restart({RestartType, Delay}, Reason, Child, State) -> {ok, state_del_child(Child, NState)} end; do_restart(permanent, Reason, Child, State) -> - maybe_report_and_restart(Reason, Child, State); -do_restart(intrinsic, normal, Child, State) -> - {shutdown, state_del_child(Child, State)}; -do_restart(intrinsic, shutdown, Child = #child{child_type = supervisor}, - State) -> - {shutdown, state_del_child(Child, State)}; -do_restart(_, normal, Child, State) -> - NState = state_del_child(Child, State), - {ok, NState}; -do_restart(_, shutdown, Child, State) -> - NState = state_del_child(Child, State), - {ok, NState}; + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State); +do_restart(Type, normal, Child, State) -> + normal_or_shutdown_restart(Type, Child, State); +do_restart(Type, shutdown, Child, State) -> + normal_or_shutdown_restart(Type, Child, State); +do_restart(Type, {shutdown, _}, Child, State) -> + normal_or_shutdown_restart(Type, Child, State); do_restart(Type, Reason, Child, State) when Type =:= transient orelse Type =:= intrinsic -> - maybe_report_and_restart(Reason, Child, State); + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State); do_restart(temporary, Reason, Child, State) -> - maybe_report(Reason, Child, State), + report_error(child_terminated, Reason, Child, State#state.name), NState = state_del_child(Child, State), {ok, NState}. -maybe_report_and_restart({shutdown, _}, Child, State = #state{intensity = 0}) -> - {terminate, NState} = add_restart(State), - {shutdown, state_del_child(Child, NState)}; -maybe_report_and_restart(Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name), - restart(Child, State). - -maybe_report({shutdown, _}, _Child, #state{intensity = 0}) -> - ok; -maybe_report(Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name). +normal_or_shutdown_restart(intrinsic, Child, State) -> + {shutdown, state_del_child(Child, State)}; +normal_or_shutdown_restart(_, Child, State) -> + NState = state_del_child(Child, State), + {ok, NState}. restart(Child, State) -> case add_restart(State) of @@ -691,17 +681,17 @@ do_terminate(Child, _SupName) -> Child. %%----------------------------------------------------------------- -%% Shutdowns a child. We must check the EXIT value +%% Shutdowns a child. We must check the EXIT value %% of the child, because it might have died with another reason than -%% the wanted. In that case we want to report the error. We put a -%% monitor on the child an check for the 'DOWN' message instead of -%% checking for the 'EXIT' message, because if we check the 'EXIT' -%% message a "naughty" child, who does unlink(Sup), could hang the -%% supervisor. +%% the wanted. In that case we want to report the error. We put a +%% monitor on the child an check for the 'DOWN' message instead of +%% checking for the 'EXIT' message, because if we check the 'EXIT' +%% message a "naughty" child, who does unlink(Sup), could hang the +%% supervisor. %% Returns: ok | {error, OtherReason} (this should be reported) %%----------------------------------------------------------------- shutdown(Pid, brutal_kill) -> - + case monitor_child(Pid) of ok -> exit(Pid, kill), @@ -711,16 +701,16 @@ shutdown(Pid, brutal_kill) -> {'DOWN', _MRef, process, Pid, OtherReason} -> {error, OtherReason} end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end; shutdown(Pid, Time) -> - + case monitor_child(Pid) of ok -> exit(Pid, shutdown), %% Try to shutdown gracefully - receive + receive {'DOWN', _MRef, process, Pid, shutdown} -> ok; {'DOWN', _MRef, process, Pid, OtherReason} -> @@ -732,14 +722,14 @@ shutdown(Pid, Time) -> {error, OtherReason} end end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end. %% Help function to shutdown/2 switches from link to monitor approach monitor_child(Pid) -> - - %% Do the monitor operation first so that if the child dies + + %% Do the monitor operation first so that if the child dies %% before the monitoring is done causing a 'DOWN'-message with %% reason noproc, we will get the real reason in the 'EXIT'-message %% unless a naughty child has already done unlink... @@ -749,22 +739,22 @@ monitor_child(Pid) -> receive %% If the child dies before the unlik we must empty %% the mail-box of the 'EXIT'-message and the 'DOWN'-message. - {'EXIT', Pid, Reason} -> - receive + {'EXIT', Pid, Reason} -> + receive {'DOWN', _, process, Pid, _} -> {error, Reason} end - after 0 -> + after 0 -> %% If a naughty child did unlink and the child dies before - %% monitor the result will be that shutdown/2 receives a + %% monitor the result will be that shutdown/2 receives a %% 'DOWN'-message with reason noproc. %% If the child should die after the unlink there %% will be a 'DOWN'-message with a correct reason - %% that will be handled in shutdown/2. - ok + %% that will be handled in shutdown/2. + ok end. - - + + %%----------------------------------------------------------------- %% Child/State manipulating functions. %%----------------------------------------------------------------- @@ -818,7 +808,7 @@ remove_child(Child, State) -> %% Args: SupName = {local, atom()} | {global, atom()} | self %% Type = {Strategy, MaxIntensity, Period} %% Strategy = one_for_one | one_for_all | simple_one_for_one | -%% rest_for_one +%% rest_for_one %% MaxIntensity = integer() %% Period = integer() %% Mod :== atom() @@ -912,10 +902,10 @@ validChildType(supervisor) -> true; validChildType(worker) -> true; validChildType(What) -> throw({invalid_child_type, What}). -validName(_Name) -> true. +validName(_Name) -> true. -validFunc({M, F, A}) when is_atom(M), - is_atom(F), +validFunc({M, F, A}) when is_atom(M), + is_atom(F), is_list(A) -> true; validFunc(Func) -> throw({invalid_mfa, Func}). @@ -932,7 +922,7 @@ validDelay(Delay) when is_number(Delay), Delay >= 0 -> true; validDelay(What) -> throw({invalid_delay, What}). -validShutdown(Shutdown, _) +validShutdown(Shutdown, _) when is_integer(Shutdown), Shutdown > 0 -> true; validShutdown(infinity, supervisor) -> true; validShutdown(brutal_kill, _) -> true; @@ -958,7 +948,7 @@ validMods(Mods) -> throw({invalid_modules, Mods}). %%% Returns: {ok, State'} | {terminate, State'} %%% ------------------------------------------------------ -add_restart(State) -> +add_restart(State) -> I = State#state.intensity, P = State#state.period, R = State#state.restarts, -- cgit v1.2.1 From 54753b4b0b5a803ebe4777bc7e771b8e43d6fa1f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 16:04:41 +0100 Subject: Unify recovery into one boot step, based binding recovery on the queues that have been recovered. --- src/rabbit.erl | 47 +++++++++++++++++++++++++++++++++++++---------- src/rabbit_amqqueue.erl | 7 +++---- src/rabbit_binding.erl | 22 ++++++++-------------- src/rabbit_exchange.erl | 41 +++++++---------------------------------- 4 files changed, 55 insertions(+), 62 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 807e9e7d..86c53ff6 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -27,7 +27,7 @@ %%--------------------------------------------------------------------------- %% Boot steps. --export([maybe_insert_default_data/0, boot_delegate/0]). +-export([maybe_insert_default_data/0, boot_delegate/0, recover/0]). -rabbit_boot_step({codec_correctness_check, [{description, "codec correctness check"}, @@ -123,15 +123,9 @@ {requires, core_initialized}, {enables, routing_ready}]}). --rabbit_boot_step({exchange_recovery, - [{description, "exchange recovery"}, - {mfa, {rabbit_exchange, recover, []}}, - {requires, empty_db_check}, - {enables, routing_ready}]}). - --rabbit_boot_step({queue_sup_queue_recovery, - [{description, "queue supervisor and queue recovery"}, - {mfa, {rabbit_amqqueue, start, []}}, +-rabbit_boot_step({recovery, + [{description, "exchange / queue recovery"}, + {mfa, {rabbit, recover, []}}, {requires, empty_db_check}, {enables, routing_ready}]}). @@ -186,6 +180,7 @@ -spec(maybe_insert_default_data/0 :: () -> 'ok'). -spec(boot_delegate/0 :: () -> 'ok'). +-spec(recover/0 :: () -> 'ok'). -endif. @@ -464,6 +459,38 @@ boot_delegate() -> {ok, Count} = application:get_env(rabbit, delegate_count), rabbit_sup:start_child(delegate_sup, [Count]). +recover() -> + Xs = rabbit_exchange:recover(), + Qs = rabbit_amqqueue:start(), + Bs = rabbit_binding:recover(Qs), + {RecXBs, NoRecSrcBs} = filter_recovered_exchanges(Xs, Bs), + ok = recovery_callbacks(RecXBs, NoRecSrcBs). + +filter_recovered_exchanges(Xs, Bs) -> + RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), + lists:foldl( + fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> + case dict:find(Src, RecXs) of + {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; + error -> {RecXBs, dict:append(Src, B, NoRecXBs)} + end + end, {dict:new(), dict:new()}, Bs). + +recovery_callbacks(RecXBs, NoRecXBs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + dict:map(fun (X, Bs) -> + rabbit_exchange:callback(X, start, [Tx, X, Bs]) + end, RecXBs), + dict:map(fun (Src, Bs) -> + {ok, X} = rabbit_exchange:lookup(Src), + rabbit_exchange:callback(X, add_bindings, + [Tx, X, Bs]) + end, NoRecXBs) + end), + ok. + maybe_insert_default_data() -> case rabbit_mnesia:is_db_empty() of true -> insert_default_data(); diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index c7391965..2618c1f5 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -57,7 +57,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(start/0 :: () -> 'ok'). +-spec(start/0 :: () -> [rabbit_types:amqqueue()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), @@ -166,8 +166,7 @@ start() -> {rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []}, transient, infinity, supervisor, [rabbit_amqqueue_sup]}), - _RealDurableQueues = recover_durable_queues(DurableQueues), - ok. + recover_durable_queues(DurableQueues). stop() -> ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup), @@ -188,7 +187,7 @@ find_durable_queues() -> recover_durable_queues(DurableQueues) -> Qs = [start_queue_process(Q) || Q <- DurableQueues], [Q || Q <- Qs, - gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == Q]. + gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index c9cf0a39..e656cfc7 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -17,7 +17,7 @@ -module(rabbit_binding). -include("rabbit.hrl"). --export([recover/0, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([recover/1, exists/1, add/1, remove/1, add/2, remove/2, list/1]). -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, @@ -50,7 +50,7 @@ -opaque(deletions() :: dict()). --spec(recover/0 :: () -> [rabbit_types:binding()]). +-spec(recover/1 :: ([rabbit_types:amqqueue()]) -> [rabbit_types:binding()]). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). -spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). @@ -93,10 +93,11 @@ destination_name, destination_kind, routing_key, arguments]). -recover() -> +recover(Qs) -> + QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case should_recover(B) of + case should_recover(B, QNames) of true -> {_, Rev} = route_with_reverse(Route), ok = mnesia:write(rabbit_route, Route, write), ok = mnesia:write(rabbit_reverse_route, Rev, write), @@ -105,19 +106,12 @@ recover() -> end end, [], rabbit_durable_route). -should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}) -> +should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, + QNames) -> case mnesia:read({rabbit_route, B}) of [] -> case Kind of exchange -> true; - queue -> case mnesia:read({rabbit_durable_queue, Dest}) of - [Q] -> #amqqueue{pid = Pid} = Q, - Node = node(), - case node(Pid) of - Node -> true; - _ -> false - end; - _ -> false - end + queue -> sets:is_element(Dest, QNames) end; _ -> false end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 572a0b70..fa837d0c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -83,41 +83,14 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_fold( - fun (X = #exchange{name = XName}, Acc) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc]; - [_] -> Acc - end - end, [], rabbit_durable_exchange), - Bs = rabbit_binding:recover(), - {RecXBs, NoRecXBs} = filter_recovered_exchanges(Xs, Bs), - ok = recovery_callbacks(RecXBs, NoRecXBs). - -filter_recovered_exchanges(Xs, Bs) -> - RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), - lists:foldl( - fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> - case dict:find(Src, RecXs) of - {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; - error -> {ok, X} = lookup(Src), - {RecXBs, dict:append(X, B, NoRecXBs)} + rabbit_misc:table_fold( + fun (X = #exchange{name = XName}, Acc) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc]; + [_] -> Acc end - end, {dict:new(), dict:new()}, Bs). - -recovery_callbacks(RecXBs, NoRecXBs) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> - dict:map(fun (X = #exchange{type = Type}, Bs) -> - (type_to_module(Type)):start(Tx, X, Bs) - end, RecXBs), - dict:map(fun (X = #exchange{type = Type}, Bs) -> - (type_to_module(Type)):add_bindings(Tx, X, Bs) - end, NoRecXBs) - end), - ok. + end, [], rabbit_durable_exchange). callback(#exchange{type = XType}, Fun, Args) -> apply(type_to_module(XType), Fun, Args). -- cgit v1.2.1 From 338aad71454799c932b875b9ce7e57bcedf44793 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 16:18:42 +0100 Subject: Fix tests. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 9b122a02..89d0d162 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2322,7 +2322,7 @@ test_queue_recover() -> after 10000 -> exit(timeout_waiting_for_queue_death) end, rabbit_amqqueue:stop(), - ok = rabbit_amqqueue:start(), + rabbit_amqqueue:start(), rabbit_amqqueue:with_or_die( QName, fun (Q1 = #amqqueue { pid = QPid1 }) -> -- cgit v1.2.1 From 3833c25b23c209f3c5a77d14ec459b15c82b7f55 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 16:25:43 +0100 Subject: Recover e2e properly. --- src/rabbit.erl | 2 +- src/rabbit_binding.erl | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 86c53ff6..6b6731a3 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -462,7 +462,7 @@ boot_delegate() -> recover() -> Xs = rabbit_exchange:recover(), Qs = rabbit_amqqueue:start(), - Bs = rabbit_binding:recover(Qs), + Bs = rabbit_binding:recover(Xs, Qs), {RecXBs, NoRecSrcBs} = filter_recovered_exchanges(Xs, Bs), ok = recovery_callbacks(RecXBs, NoRecSrcBs). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index e656cfc7..fff9016c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -17,7 +17,7 @@ -module(rabbit_binding). -include("rabbit.hrl"). --export([recover/1, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([recover/2, exists/1, add/1, remove/1, add/2, remove/2, list/1]). -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, @@ -50,7 +50,8 @@ -opaque(deletions() :: dict()). --spec(recover/1 :: ([rabbit_types:amqqueue()]) -> [rabbit_types:binding()]). +-spec(recover/2 :: ([rabbit_types:exchange()], [rabbit_types:amqqueue()]) -> + [rabbit_types:binding()]). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). -spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). @@ -93,11 +94,12 @@ destination_name, destination_kind, routing_key, arguments]). -recover(Qs) -> +recover(Xs, Qs) -> + XNames = sets:from_list([Name || #exchange{name = Name} <- Xs]), QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case should_recover(B, QNames) of + case should_recover(B, XNames, QNames) of true -> {_, Rev} = route_with_reverse(Route), ok = mnesia:write(rabbit_route, Route, write), ok = mnesia:write(rabbit_reverse_route, Rev, write), @@ -107,12 +109,12 @@ recover(Qs) -> end, [], rabbit_durable_route). should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, - QNames) -> + XNames, QNames) -> case mnesia:read({rabbit_route, B}) of - [] -> case Kind of - exchange -> true; - queue -> sets:is_element(Dest, QNames) - end; + [] -> sets:is_element(Dest, case Kind of + exchange -> XNames; + queue -> QNames + end); _ -> false end. -- cgit v1.2.1 From 8a749d93ff409f665ff610ca62482705f672db13 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 31 Mar 2011 17:38:29 +0100 Subject: Slimmer gatherer termination --- src/rabbit_misc.erl | 10 +--------- src/rabbit_msg_store.erl | 2 +- src/rabbit_queue_index.erl | 6 +++--- src/test_sup.erl | 2 +- 4 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 2e9563cf..1daeeb2a 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -48,8 +48,7 @@ -export([sort_field_table/1]). -export([pid_to_string/1, string_to_pid/1]). -export([version_compare/2, version_compare/3]). --export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3, - unlink_and_capture_exit/1]). +-export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). -export([now_ms/0]). @@ -178,7 +177,6 @@ -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})). -spec(dict_cons/3 :: (any(), any(), dict()) -> dict()). -spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()). --spec(unlink_and_capture_exit/1 :: (pid()) -> 'ok'). -spec(get_options/2 :: ([optdef()], [string()]) -> {[string()], [{string(), any()}]}). -spec(all_module_attributes/1 :: (atom()) -> [{atom(), [term()]}]). @@ -749,12 +747,6 @@ dict_cons(Key, Value, Dict) -> orddict_cons(Key, Value, Dict) -> orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict). -unlink_and_capture_exit(Pid) -> - unlink(Pid), - receive {'EXIT', Pid, _} -> ok - after 0 -> ok - end. - %% Separate flags and options from arguments. %% get_options([{flag, "-q"}, {option, "-p", "/"}], %% ["set_permissions","-p","/","guest", diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 34c793ec..65688142 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1511,8 +1511,8 @@ build_index(Gatherer, Left, [], sum_file_size = SumFileSize }) -> case gatherer:out(Gatherer) of empty -> + unlink(Gatherer), ok = gatherer:stop(Gatherer), - ok = rabbit_misc:unlink_and_capture_exit(Gatherer), ok = index_delete_by_file(undefined, State), Offset = case ets:lookup(FileSummaryEts, Left) of [] -> 0; diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 367953b8..aaf3df78 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -514,8 +514,8 @@ queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) -> queue_index_walker({next, Gatherer}) when is_pid(Gatherer) -> case gatherer:out(Gatherer) of empty -> + unlink(Gatherer), ok = gatherer:stop(Gatherer), - ok = rabbit_misc:unlink_and_capture_exit(Gatherer), finished; {value, {MsgId, Count}} -> {MsgId, Count, {next, Gatherer}} @@ -1036,8 +1036,8 @@ foreach_queue_index(Funs) -> end) end || QueueDirName <- QueueDirNames], empty = gatherer:out(Gatherer), - ok = gatherer:stop(Gatherer), - ok = rabbit_misc:unlink_and_capture_exit(Gatherer). + unlink(Gatherer), + ok = gatherer:stop(Gatherer). transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) -> ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun), diff --git a/src/test_sup.erl b/src/test_sup.erl index b4df1fd0..5fc0eac0 100644 --- a/src/test_sup.erl +++ b/src/test_sup.erl @@ -46,7 +46,7 @@ with_sup(RestartStrategy, Fun) -> {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]), Res = Fun(SupPid), exit(SupPid, shutdown), - rabbit_misc:unlink_and_capture_exit(SupPid), + unlink(SupPid), Res. init([RestartStrategy]) -> -- cgit v1.2.1 From ff78d574ece961bcafc3efe1fbd235893d8ea28a Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 1 Apr 2011 04:06:07 +0100 Subject: fix some R12B-5isms --- src/gm.erl | 8 ++++---- src/rabbit_exchange_type_topic.erl | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 1edcde11..aa5ba146 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -1011,7 +1011,7 @@ prune_or_create_group(Self, GroupName) -> fun () -> GroupNew = #gm_group { name = GroupName, members = [Self], version = 0 }, - case mnesia:read(?GROUP_TABLE, GroupName) of + case mnesia:read(?GROUP_TABLE, GroupName, read) of [] -> mnesia:write(GroupNew), GroupNew; @@ -1029,7 +1029,7 @@ record_dead_member_in_group(Member, GroupName) -> {atomic, Group} = mnesia:sync_transaction( fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName), + mnesia:read(?GROUP_TABLE, GroupName, read), case lists:splitwith( fun (Member1) -> Member1 =/= Member end, Members) of {_Members1, []} -> %% not found - already recorded dead @@ -1049,7 +1049,7 @@ record_new_member_in_group(GroupName, Left, NewMember, Fun) -> mnesia:sync_transaction( fun () -> [#gm_group { members = Members, version = Ver } = Group1] = - mnesia:read(?GROUP_TABLE, GroupName), + mnesia:read(?GROUP_TABLE, GroupName, read), {Prefix, [Left | Suffix]} = lists:splitwith(fun (M) -> M =/= Left end, Members), Members1 = Prefix ++ [Left, NewMember | Suffix], @@ -1068,7 +1068,7 @@ erase_members_in_group(Members, GroupName) -> fun () -> [Group1 = #gm_group { members = [_|_] = Members1, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName), + mnesia:read(?GROUP_TABLE, GroupName, read), case Members1 -- DeadMembers of Members1 -> Group1; Members2 -> Group2 = diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..a61e380b 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -191,7 +191,7 @@ trie_child(X, Node, Word) -> case mnesia:read(rabbit_topic_trie_edge, #trie_edge{exchange_name = X, node_id = Node, - word = Word}) of + word = Word}, read) of [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; [] -> error end. -- cgit v1.2.1 From 3da0764b6482d711bff0faca201fc5851543ad81 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 11:03:52 +0100 Subject: Use names, not exchanges / queues. --- src/rabbit.erl | 34 +++++++++++++++++----------------- src/rabbit_amqqueue.erl | 2 +- src/rabbit_binding.erl | 8 ++++---- src/rabbit_exchange.erl | 2 +- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 6b6731a3..fe392c5f 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -460,34 +460,34 @@ boot_delegate() -> rabbit_sup:start_child(delegate_sup, [Count]). recover() -> - Xs = rabbit_exchange:recover(), - Qs = rabbit_amqqueue:start(), - Bs = rabbit_binding:recover(Xs, Qs), - {RecXBs, NoRecSrcBs} = filter_recovered_exchanges(Xs, Bs), - ok = recovery_callbacks(RecXBs, NoRecSrcBs). + XNames = rabbit_exchange:recover(), + QNames = rabbit_amqqueue:start(), + Bs = rabbit_binding:recover(XNames, QNames), + {RecXBs, NoRecXBs} = filter_recovered_exchanges(XNames, Bs), + ok = recovery_callbacks(RecXBs, NoRecXBs). filter_recovered_exchanges(Xs, Bs) -> - RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), + RecXs = sets:from_list(Xs), lists:foldl( fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> - case dict:find(Src, RecXs) of - {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; - error -> {RecXBs, dict:append(Src, B, NoRecXBs)} + case sets:is_element(Src, RecXs) of + true -> {dict:append(Src, B, RecXBs), NoRecXBs}; + false -> {RecXBs, dict:append(Src, B, NoRecXBs)} end end, {dict:new(), dict:new()}, Bs). recovery_callbacks(RecXBs, NoRecXBs) -> + CB = fun (Tx, F, XBs) -> + dict:map(fun (XName, Bs) -> + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, F, [Tx, X, Bs]) + end, XBs) + end, rabbit_misc:execute_mnesia_transaction( fun () -> ok end, fun (ok, Tx) -> - dict:map(fun (X, Bs) -> - rabbit_exchange:callback(X, start, [Tx, X, Bs]) - end, RecXBs), - dict:map(fun (Src, Bs) -> - {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_bindings, - [Tx, X, Bs]) - end, NoRecXBs) + CB(Tx, start, RecXBs), + CB(Tx, add_bindings, NoRecXBs) end), ok. diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 2618c1f5..6267b823 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -186,7 +186,7 @@ find_durable_queues() -> recover_durable_queues(DurableQueues) -> Qs = [start_queue_process(Q) || Q <- DurableQueues], - [Q || Q <- Qs, + [Q#amqqueue.name || Q <- Qs, gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index fff9016c..47793920 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -94,12 +94,12 @@ destination_name, destination_kind, routing_key, arguments]). -recover(Xs, Qs) -> - XNames = sets:from_list([Name || #exchange{name = Name} <- Xs]), - QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), +recover(XsL, QsL) -> + Xs = sets:from_list(XsL), + Qs = sets:from_list(QsL), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case should_recover(B, XNames, QNames) of + case should_recover(B, Xs, Qs) of true -> {_, Rev} = route_with_reverse(Route), ok = mnesia:write(rabbit_route, Route, write), ok = mnesia:write(rabbit_reverse_route, Rev, write), diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index fa837d0c..e05a8812 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -87,7 +87,7 @@ recover() -> fun (X = #exchange{name = XName}, Acc) -> case mnesia:read({rabbit_exchange, XName}) of [] -> ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc]; + [XName | Acc]; [_] -> Acc end end, [], rabbit_durable_exchange). -- cgit v1.2.1 From e12c48c0a75969d873fd761d8f8672d5bf32517d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 12:22:42 +0100 Subject: Revert to trailing whitespace et al to reduce diff to Erlang's supervisor --- src/supervisor2.erl | 104 ++++++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 73316db9..19a95328 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -115,10 +115,10 @@ behaviour_info(_Other) -> %%% --------------------------------------------------- start_link(Mod, Args) -> gen_server:start_link(?MODULE, {self, Mod, Args}, []). - + start_link(SupName, Mod, Args) -> gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []). - + %%% --------------------------------------------------- %%% Interface functions. %%% --------------------------------------------------- @@ -161,9 +161,9 @@ delayed_restart(Supervisor, RestartDetails) -> gen_server:cast(Supervisor, {delayed_restart, RestartDetails}). %%% --------------------------------------------------- -%%% +%%% %%% Initialize the supervisor. -%%% +%%% %%% --------------------------------------------------- init({SupName, Mod, Args}) -> process_flag(trap_exit, true), @@ -182,7 +182,7 @@ init({SupName, Mod, Args}) -> Error -> {stop, {bad_return, {Mod, init, Error}}} end. - + init_children(State, StartSpec) -> SupName = State#state.name, case check_startspec(StartSpec) of @@ -212,7 +212,7 @@ init_dynamic(_State, StartSpec) -> %% Func: start_children/2 %% Args: Children = [#child] in start order %% SupName = {local, atom()} | {global, atom()} | {pid(),Mod} -%% Purpose: Start all children. The new list contains #child's +%% Purpose: Start all children. The new list contains #child's %% with pids. %% Returns: {ok, NChildren} | {error, NChildren} %% NChildren = [#child] in termination order (reversed @@ -244,7 +244,7 @@ do_start_child(SupName, Child) -> NChild = Child#child{pid = Pid}, report_progress(NChild, SupName), {ok, Pid, Extra}; - ignore -> + ignore -> {ok, undefined}; {error, What} -> {error, What}; What -> {error, What} @@ -263,23 +263,23 @@ do_start_child_i(M, F, A) -> What -> {error, What} end. - + %%% --------------------------------------------------- -%%% +%%% %%% Callback functions. -%%% +%%% %%% --------------------------------------------------- handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) -> #child{mfa = {M, F, A}} = hd(State#state.children), Args = A ++ EArgs, case do_start_child_i(M, F, Args) of {ok, Pid} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid}, NState}; {ok, Pid, Extra} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid, Extra}, NState}; What -> @@ -372,7 +372,7 @@ handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> %%% Hopefully cause a function-clause as there is no API function %%% that utilizes cast. handle_cast(null, State) -> - error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", + error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", []), {noreply, State}. @@ -389,7 +389,7 @@ handle_info({'EXIT', Pid, Reason}, State) -> end; handle_info(Msg, State) -> - error_logger:error_msg("Supervisor received unexpected message: ~p~n", + error_logger:error_msg("Supervisor received unexpected message: ~p~n", [Msg]), {noreply, State}. %% @@ -439,13 +439,13 @@ check_flags({Strategy, MaxIntensity, Period}) -> check_flags(What) -> {bad_flags, What}. -update_childspec(State, StartSpec) when ?is_simple(State) -> - case check_startspec(StartSpec) of - {ok, [Child]} -> - {ok, State#state{children = [Child]}}; - Error -> - {error, Error} - end; +update_childspec(State, StartSpec) when ?is_simple(State) -> + case check_startspec(StartSpec) of + {ok, [Child]} -> + {ok, State#state{children = [Child]}}; + Error -> + {error, Error} + end; update_childspec(State, StartSpec) -> case check_startspec(StartSpec) of @@ -466,7 +466,7 @@ update_childspec1([Child|OldC], Children, KeepOld) -> end; update_childspec1([], Children, KeepOld) -> % Return them in (keeped) reverse start order. - lists:reverse(Children ++ KeepOld). + lists:reverse(Children ++ KeepOld). update_chsp(OldCh, Children) -> case lists:map(fun (Ch) when OldCh#child.name =:= Ch#child.name -> @@ -480,7 +480,7 @@ update_chsp(OldCh, Children) -> NewC -> {ok, NewC} end. - + %%% --------------------------------------------------- %%% Start a new child. %%% --------------------------------------------------- @@ -492,12 +492,12 @@ handle_start_child(Child, State) -> {ok, Pid} -> Children = State#state.children, {{ok, Pid}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {ok, Pid, Extra} -> Children = State#state.children, {{ok, Pid, Extra}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {error, What} -> {{error, {What, Child}}, State} @@ -681,17 +681,17 @@ do_terminate(Child, _SupName) -> Child. %%----------------------------------------------------------------- -%% Shutdowns a child. We must check the EXIT value +%% Shutdowns a child. We must check the EXIT value %% of the child, because it might have died with another reason than -%% the wanted. In that case we want to report the error. We put a -%% monitor on the child an check for the 'DOWN' message instead of -%% checking for the 'EXIT' message, because if we check the 'EXIT' -%% message a "naughty" child, who does unlink(Sup), could hang the -%% supervisor. +%% the wanted. In that case we want to report the error. We put a +%% monitor on the child an check for the 'DOWN' message instead of +%% checking for the 'EXIT' message, because if we check the 'EXIT' +%% message a "naughty" child, who does unlink(Sup), could hang the +%% supervisor. %% Returns: ok | {error, OtherReason} (this should be reported) %%----------------------------------------------------------------- shutdown(Pid, brutal_kill) -> - + case monitor_child(Pid) of ok -> exit(Pid, kill), @@ -701,16 +701,16 @@ shutdown(Pid, brutal_kill) -> {'DOWN', _MRef, process, Pid, OtherReason} -> {error, OtherReason} end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end; shutdown(Pid, Time) -> - + case monitor_child(Pid) of ok -> exit(Pid, shutdown), %% Try to shutdown gracefully - receive + receive {'DOWN', _MRef, process, Pid, shutdown} -> ok; {'DOWN', _MRef, process, Pid, OtherReason} -> @@ -722,14 +722,14 @@ shutdown(Pid, Time) -> {error, OtherReason} end end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end. %% Help function to shutdown/2 switches from link to monitor approach monitor_child(Pid) -> - - %% Do the monitor operation first so that if the child dies + + %% Do the monitor operation first so that if the child dies %% before the monitoring is done causing a 'DOWN'-message with %% reason noproc, we will get the real reason in the 'EXIT'-message %% unless a naughty child has already done unlink... @@ -739,22 +739,22 @@ monitor_child(Pid) -> receive %% If the child dies before the unlik we must empty %% the mail-box of the 'EXIT'-message and the 'DOWN'-message. - {'EXIT', Pid, Reason} -> - receive + {'EXIT', Pid, Reason} -> + receive {'DOWN', _, process, Pid, _} -> {error, Reason} end - after 0 -> + after 0 -> %% If a naughty child did unlink and the child dies before - %% monitor the result will be that shutdown/2 receives a + %% monitor the result will be that shutdown/2 receives a %% 'DOWN'-message with reason noproc. %% If the child should die after the unlink there %% will be a 'DOWN'-message with a correct reason - %% that will be handled in shutdown/2. - ok + %% that will be handled in shutdown/2. + ok end. - - + + %%----------------------------------------------------------------- %% Child/State manipulating functions. %%----------------------------------------------------------------- @@ -808,7 +808,7 @@ remove_child(Child, State) -> %% Args: SupName = {local, atom()} | {global, atom()} | self %% Type = {Strategy, MaxIntensity, Period} %% Strategy = one_for_one | one_for_all | simple_one_for_one | -%% rest_for_one +%% rest_for_one %% MaxIntensity = integer() %% Period = integer() %% Mod :== atom() @@ -902,10 +902,10 @@ validChildType(supervisor) -> true; validChildType(worker) -> true; validChildType(What) -> throw({invalid_child_type, What}). -validName(_Name) -> true. +validName(_Name) -> true. -validFunc({M, F, A}) when is_atom(M), - is_atom(F), +validFunc({M, F, A}) when is_atom(M), + is_atom(F), is_list(A) -> true; validFunc(Func) -> throw({invalid_mfa, Func}). @@ -922,7 +922,7 @@ validDelay(Delay) when is_number(Delay), Delay >= 0 -> true; validDelay(What) -> throw({invalid_delay, What}). -validShutdown(Shutdown, _) +validShutdown(Shutdown, _) when is_integer(Shutdown), Shutdown > 0 -> true; validShutdown(infinity, supervisor) -> true; validShutdown(brutal_kill, _) -> true; @@ -948,7 +948,7 @@ validMods(Mods) -> throw({invalid_modules, Mods}). %%% Returns: {ok, State'} | {terminate, State'} %%% ------------------------------------------------------ -add_restart(State) -> +add_restart(State) -> I = State#state.intensity, P = State#state.period, R = State#state.restarts, -- cgit v1.2.1 From 4c5f452226bad9d82ab3ad387d18a89262964307 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 1 Apr 2011 13:09:50 +0100 Subject: Unlink before exit --- src/test_sup.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test_sup.erl b/src/test_sup.erl index 5fc0eac0..150235da 100644 --- a/src/test_sup.erl +++ b/src/test_sup.erl @@ -45,8 +45,8 @@ test_supervisor_delayed_restart(SupPid) -> with_sup(RestartStrategy, Fun) -> {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]), Res = Fun(SupPid), - exit(SupPid, shutdown), unlink(SupPid), + exit(SupPid, shutdown), Res. init([RestartStrategy]) -> -- cgit v1.2.1 From 493f98d8b6fe5d85223e754b1c05b73903490857 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 13:19:31 +0100 Subject: 'shutdown' is only treated specially if the child is a supervisor (previous behaviour was actually confused on this front). Some minor renaming, reordering, and inlining --- src/supervisor2.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 19a95328..ec1ee9cd 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -38,7 +38,7 @@ %% child is a supervisor and it exits normally (i.e. with reason of %% 'shutdown') then the child's parent also exits normally. %% -%% 5) normal, shutdown and {shutdown, _} exit reasons are all treated the same +%% 5) normal, and {shutdown, _} exit reasons are all treated the same %% (i.e. are regarded as normal exits) %% %% All modifications are (C) 2010-2011 VMware, Inc. @@ -548,11 +548,11 @@ do_restart(permanent, Reason, Child, State) -> report_error(child_terminated, Reason, Child, State#state.name), restart(Child, State); do_restart(Type, normal, Child, State) -> - normal_or_shutdown_restart(Type, Child, State); -do_restart(Type, shutdown, Child, State) -> - normal_or_shutdown_restart(Type, Child, State); + del_child_and_maybe_shutdown(Type, Child, State); do_restart(Type, {shutdown, _}, Child, State) -> - normal_or_shutdown_restart(Type, Child, State); + del_child_and_maybe_shutdown(Type, Child, State); +do_restart(Type, shutdown, Child = #child{child_type = supervisor}, State) -> + del_child_and_maybe_shutdown(Type, Child, State); do_restart(Type, Reason, Child, State) when Type =:= transient orelse Type =:= intrinsic -> report_error(child_terminated, Reason, Child, State#state.name), @@ -562,11 +562,10 @@ do_restart(temporary, Reason, Child, State) -> NState = state_del_child(Child, State), {ok, NState}. -normal_or_shutdown_restart(intrinsic, Child, State) -> +del_child_and_maybe_shutdown(intrinsic, Child, State) -> {shutdown, state_del_child(Child, State)}; -normal_or_shutdown_restart(_, Child, State) -> - NState = state_del_child(Child, State), - {ok, NState}. +del_child_and_maybe_shutdown(_, Child, State) -> + {ok, state_del_child(Child, State)}. restart(Child, State) -> case add_restart(State) of -- cgit v1.2.1 From cde255cac929da94d8722ca901b0b65876fe72f3 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 1 Apr 2011 14:27:29 +0100 Subject: use mnesia:read/1 instead of read/3 --- src/gm.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index aa5ba146..8b7dc70c 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -1011,7 +1011,7 @@ prune_or_create_group(Self, GroupName) -> fun () -> GroupNew = #gm_group { name = GroupName, members = [Self], version = 0 }, - case mnesia:read(?GROUP_TABLE, GroupName, read) of + case mnesia:read({?GROUP_TABLE, GroupName}) of [] -> mnesia:write(GroupNew), GroupNew; @@ -1029,7 +1029,7 @@ record_dead_member_in_group(Member, GroupName) -> {atomic, Group} = mnesia:sync_transaction( fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName, read), + mnesia:read({?GROUP_TABLE, GroupName}), case lists:splitwith( fun (Member1) -> Member1 =/= Member end, Members) of {_Members1, []} -> %% not found - already recorded dead @@ -1049,7 +1049,7 @@ record_new_member_in_group(GroupName, Left, NewMember, Fun) -> mnesia:sync_transaction( fun () -> [#gm_group { members = Members, version = Ver } = Group1] = - mnesia:read(?GROUP_TABLE, GroupName, read), + mnesia:read({?GROUP_TABLE, GroupName}), {Prefix, [Left | Suffix]} = lists:splitwith(fun (M) -> M =/= Left end, Members), Members1 = Prefix ++ [Left, NewMember | Suffix], @@ -1068,7 +1068,7 @@ erase_members_in_group(Members, GroupName) -> fun () -> [Group1 = #gm_group { members = [_|_] = Members1, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName, read), + mnesia:read({?GROUP_TABLE, GroupName}), case Members1 -- DeadMembers of Members1 -> Group1; Members2 -> Group2 = -- cgit v1.2.1 From c4098939f59b526754247b708f87e1760cea1c4d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 14:08:39 +0100 Subject: Another read/1. --- src/rabbit_exchange_type_topic.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index a61e380b..c192f8cf 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -188,10 +188,10 @@ follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) -> end. trie_child(X, Node, Word) -> - case mnesia:read(rabbit_topic_trie_edge, - #trie_edge{exchange_name = X, - node_id = Node, - word = Word}, read) of + case mnesia:read({rabbit_topic_trie_edge, + #trie_edge{exchange_name = X, + node_id = Node, + word = Word}}) of [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; [] -> error end. -- cgit v1.2.1 From c9990f38f58ed8101d83e6e5c527275761aa7a1a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 16:48:09 +0100 Subject: QAish updates. --- include/rabbit_exchange_type_spec.hrl | 3 +- src/rabbit.erl | 33 ++-------------------- src/rabbit_amqqueue.erl | 11 ++++---- src/rabbit_binding.erl | 53 +++++++++++++++++++++-------------- src/rabbit_exchange.erl | 26 ++++++++++------- src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_direct.erl | 4 +-- src/rabbit_exchange_type_fanout.erl | 4 +-- src/rabbit_exchange_type_headers.erl | 4 +-- src/rabbit_exchange_type_topic.erl | 9 ++---- 10 files changed, 67 insertions(+), 82 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 8163b6f2..fd3ddf7e 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -20,8 +20,7 @@ -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). --spec(start/3 :: (boolean(), rabbit_types:exchange(), - [rabbit_types:binding()]) -> 'ok'). +-spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). -spec(delete/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(add_bindings/3 :: (boolean(), rabbit_types:exchange(), diff --git a/src/rabbit.erl b/src/rabbit.erl index fe392c5f..2840a5b7 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -124,7 +124,7 @@ {enables, routing_ready}]}). -rabbit_boot_step({recovery, - [{description, "exchange / queue recovery"}, + [{description, "exchange, queue and binding recovery"}, {mfa, {rabbit, recover, []}}, {requires, empty_db_check}, {enables, routing_ready}]}). @@ -461,35 +461,8 @@ boot_delegate() -> recover() -> XNames = rabbit_exchange:recover(), - QNames = rabbit_amqqueue:start(), - Bs = rabbit_binding:recover(XNames, QNames), - {RecXBs, NoRecXBs} = filter_recovered_exchanges(XNames, Bs), - ok = recovery_callbacks(RecXBs, NoRecXBs). - -filter_recovered_exchanges(Xs, Bs) -> - RecXs = sets:from_list(Xs), - lists:foldl( - fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> - case sets:is_element(Src, RecXs) of - true -> {dict:append(Src, B, RecXBs), NoRecXBs}; - false -> {RecXBs, dict:append(Src, B, NoRecXBs)} - end - end, {dict:new(), dict:new()}, Bs). - -recovery_callbacks(RecXBs, NoRecXBs) -> - CB = fun (Tx, F, XBs) -> - dict:map(fun (XName, Bs) -> - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, F, [Tx, X, Bs]) - end, XBs) - end, - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> - CB(Tx, start, RecXBs), - CB(Tx, add_bindings, NoRecXBs) - end), - ok. + QNames = rabbit_amqqueue:recover(), + rabbit_binding:recover(XNames, QNames). maybe_insert_default_data() -> case rabbit_mnesia:is_db_empty() of diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 6267b823..34ed88bc 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -16,7 +16,8 @@ -module(rabbit_amqqueue). --export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). +-export([recover/0, stop/0, declare/5, delete_immediately/1, delete/3, + purge/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, @@ -57,7 +58,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(start/0 :: () -> [rabbit_types:amqqueue()]). +-spec(recover/0 :: () -> [rabbit_types:amqqueue()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), @@ -157,7 +158,7 @@ %%---------------------------------------------------------------------------- -start() -> +recover() -> DurableQueues = find_durable_queues(), {ok, BQ} = application:get_env(rabbit, backing_queue_module), ok = BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]), @@ -186,8 +187,8 @@ find_durable_queues() -> recover_durable_queues(DurableQueues) -> Qs = [start_queue_process(Q) || Q <- DurableQueues], - [Q#amqqueue.name || Q <- Qs, - gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == {new, Q}]. + [QName || Q = #amqqueue{name = QName, pid = Pid} <- Qs, + gen_server2:call(Pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 47793920..5ac9c871 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -50,8 +50,8 @@ -opaque(deletions() :: dict()). --spec(recover/2 :: ([rabbit_types:exchange()], [rabbit_types:amqqueue()]) -> - [rabbit_types:binding()]). +-spec(recover/2 :: ([rabbit_types:resource()], [rabbit_types:resource()]) -> + 'ok'). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). -spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). @@ -94,27 +94,38 @@ destination_name, destination_kind, routing_key, arguments]). -recover(XsL, QsL) -> - Xs = sets:from_list(XsL), - Qs = sets:from_list(QsL), - rabbit_misc:table_fold( - fun (Route = #route{binding = B}, Acc) -> - case should_recover(B, Xs, Qs) of - true -> {_, Rev} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, Rev, write), - [B | Acc]; - false -> Acc - end - end, [], rabbit_durable_route). +recover(XNames, QNames) -> + XNameSet = sets:from_list(XNames), + QNameSet = sets:from_list(QNames), + XBs = rabbit_misc:table_fold( + fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> + case should_recover(B, XNameSet, QNameSet) of + true -> {_, Rev} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, Rev, + write), + rabbit_misc:dict_cons(Src, B, Acc); + false -> Acc + end + end, dict:new(), rabbit_durable_route), + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + dict:map(fun (XName, Bindings) -> + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, add_bindings, + [Tx, X, Bindings]) + end, XBs) + end), + ok. -should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, - XNames, QNames) -> +should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, + XNameSet, QNameSet) -> case mnesia:read({rabbit_route, B}) of - [] -> sets:is_element(Dest, case Kind of - exchange -> XNames; - queue -> QNames - end); + [] -> sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end); _ -> false end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index e05a8812..7268b15d 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> 'ok'). +-spec(recover/0 :: () -> [rabbit_types:resource()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), @@ -83,14 +83,20 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - rabbit_misc:table_fold( - fun (X = #exchange{name = XName}, Acc) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> ok = mnesia:write(rabbit_exchange, X, write), - [XName | Acc]; - [_] -> Acc - end - end, [], rabbit_durable_exchange). + Xs = rabbit_misc:table_fold( + fun (X = #exchange{name = XName}, Acc) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc]; + [_] -> Acc + end + end, [], rabbit_durable_exchange), + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] + end), + [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> apply(type_to_module(XType), Fun, Args). @@ -120,7 +126,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - ok = (type_to_module(Type)):start(Tx, Exchange, []), + ok = (type_to_module(Type)):create(Tx, Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index ad08eb86..0fede0be 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -27,7 +27,7 @@ behaviour_info(callbacks) -> {validate, 1}, %% called after declaration and recovery - {start, 3}, + {create, 2}, %% called after exchange (auto)deletion. {delete, 3}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 1658c9f8..200c2997 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -40,7 +40,7 @@ route(#exchange{name = Name}, rabbit_router:match_routing_key(Name, Routes). validate(_X) -> ok. -start(_Tx, _X, _Bs) -> ok. +create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. add_bindings(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 83afdd71..62568949 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -39,7 +39,7 @@ route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). validate(_X) -> ok. -start(_Tx, _X, _Bs) -> ok. +create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 0fe8404f..258e785a 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -113,7 +113,7 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). validate(_X) -> ok. -start(_Tx, _X, _Bs) -> ok. +create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 52f468ee..efa5fb52 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -48,12 +48,7 @@ route(#exchange{name = X}, validate(_X) -> ok. -start(true, _X, Bs) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> - lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end); -start(false, _X, _Bs) -> +create(_Tx, _X) -> ok. delete(true, #exchange{name = X}, _Bs) -> -- cgit v1.2.1 From a958149498eb8822bfec1c21414bf64c7abe8517 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 16:55:45 +0100 Subject: Fix tests --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 89d0d162..c8ef4105 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2322,7 +2322,7 @@ test_queue_recover() -> after 10000 -> exit(timeout_waiting_for_queue_death) end, rabbit_amqqueue:stop(), - rabbit_amqqueue:start(), + rabbit_amqqueue:recover(), rabbit_amqqueue:with_or_die( QName, fun (Q1 = #amqqueue { pid = QPid1 }) -> -- cgit v1.2.1 From 1e9dfa0147c3e4526afdf5ceff58e3ee8ea293ed Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 17:04:40 +0100 Subject: Fix tests again. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index c8ef4105..995b84d9 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -595,7 +595,7 @@ test_topic_matching() -> auto_delete = false, arguments = []}, %% create rabbit_exchange_type_topic:validate(X), - exchange_op_callback(X, start, [[]]), + exchange_op_callback(X, create, []), %% add some bindings Bindings = [#binding{source = XName, -- cgit v1.2.1 From 0cde3dca66d24578464ac64b233737962232b6db Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 12:10:06 +0100 Subject: Describe the problem. That's half the battle, right? --- src/rabbit_ssl.erl | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 59 insertions(+), 4 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index 1953b6b8..e03f8d10 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -89,8 +89,10 @@ find_by_type(Type, {rdnSequence, RDNs}) -> case [V || #'AttributeTypeAndValue'{type = T, value = V} <- lists:flatten(RDNs), T == Type] of - [{printableString, S}] -> S; - [] -> not_found + [{ST, S}] when ST =:= teletexString; ST =:= printableString; + ST =:= universalString; ST =:= utf8String; + ST =:= bmpString -> format_directory_string(ST, S); + [] -> not_found end. %%-------------------------------------------------------------------------- @@ -162,8 +164,8 @@ escape_rdn_value([C | S], middle) -> format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; ST =:= universalString; ST =:= utf8String; ST =:= bmpString -> - if is_binary(S) -> binary_to_list(S); - true -> S + if is_binary(S) -> format_directory_string(ST, binary_to_list(S)); + true -> format_directory_string(ST, S) end; format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2, $Z]}) -> @@ -171,3 +173,56 @@ format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]); format_asn1_value(V) -> io_lib:format("~p", [V]). + +%% DirectoryString { INTEGER : maxSize } ::= CHOICE { +%% teletexString TeletexString (SIZE (1..maxSize)), +%% printableString PrintableString (SIZE (1..maxSize)), +%% bmpString BMPString (SIZE (1..maxSize)), +%% universalString UniversalString (SIZE (1..maxSize)), +%% uTF8String UTF8String (SIZE (1..maxSize)) } +%% +%% Precise definitions of printable / teletexString are hard to come +%% by. This is what I reconstructed: +%% +%% printableString: +%% "intended to represent the limited character sets available to +%% mainframe input terminals" +%% http://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx +%% +%% teletexString: +%% "a sizable volume of software in the world treats TeletexString +%% (T61String) as a simple 8-bit string with mostly Windows Latin 1 +%% (superset of iso-8859-1) encoding" +%% http://www.mail-archive.com/asn1@asn1.org/msg00460.html +%% (however according to that link X.680 actually defines +%% TeletexString in some much more invovled and crazy way. I suggest +%% we treat it as Windows CP1252). +%% +%% bmpString: +%% UCS-2 according to RFC 3641. Hence cannot represent unicode characters +%% above 65535. +%% +%% universalString: +%% UCS-4 according to RFC 3641. +%% +%% utf8String: +%% UTF-8 according to RFC 3641. +%% +%% Within Rabbit we assume UTF-8 encoding. Since printableString is a +%% subset of ASCII it is also a subset of UTF-8. The others need +%% converting. +%% +%% Note for testing: the default Ubuntu configuration for openssl will +%% only create printableString or teletexString types no matter what +%% you do. Edit string_mask in the [req] section of +%% /etc/ssl/openssl.cnf to change this (see comments there). You +%% probably also need to set utf8 = yes to get it to accept UTF-8 on +%% the command line. +%% +%% TODO actually convert stuff here. + +format_directory_string(printableString, S) -> S; +format_directory_string(teletexString, S) -> S; +format_directory_string(bmpString, S) -> S; +format_directory_string(universalString, S) -> S; +format_directory_string(utf8String, S) -> S. -- cgit v1.2.1 From c2eb57a92430c598db99dab8aea83b30ebee9488 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 12:11:48 +0100 Subject: Tweak comment after rereading it. --- src/rabbit_ssl.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index e03f8d10..821dde99 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -187,6 +187,7 @@ format_asn1_value(V) -> %% printableString: %% "intended to represent the limited character sets available to %% mainframe input terminals" +%% A-Z a-z 0-9 ' ( ) + , - . / : = ? [space] %% http://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx %% %% teletexString: @@ -194,8 +195,9 @@ format_asn1_value(V) -> %% (T61String) as a simple 8-bit string with mostly Windows Latin 1 %% (superset of iso-8859-1) encoding" %% http://www.mail-archive.com/asn1@asn1.org/msg00460.html -%% (however according to that link X.680 actually defines -%% TeletexString in some much more invovled and crazy way. I suggest +%% +%% (However according to that link X.680 actually defines +%% TeletexString in some much more involved and crazy way. I suggest %% we treat it as Windows CP1252). %% %% bmpString: -- cgit v1.2.1 From 487bfdc364ba842c1591bb80b16e7fe0669c9e02 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 4 Apr 2011 13:38:41 +0100 Subject: Warn about config only if new config is absent --- scripts/rabbitmq-env | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env index 3e173949..a2ef8d3c 100755 --- a/scripts/rabbitmq-env +++ b/scripts/rabbitmq-env @@ -37,7 +37,8 @@ RABBITMQ_HOME="${SCRIPT_DIR}/.." NODENAME=rabbit@${HOSTNAME%%.*} # Load configuration from the rabbitmq.conf file -if [ -f /etc/rabbitmq/rabbitmq.conf ]; then +if [ -f /etc/rabbitmq/rabbitmq.conf ] && \ + [ ! -f /etc/rabbitmq/rabbitmq-env.conf ] ; then echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- " echo "location has moved to /etc/rabbitmq/rabbitmq-env.conf" fi -- cgit v1.2.1 From 27a5c6a996bd6b9ea3f9aa39a7868c0af15eb2e6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 14:07:34 +0100 Subject: Actually DTRT. Note that this knocks out the is_binary/1 check introduced in 22bf9ebcaebf; however despite that changeset's comment this does not seem to be needed with R14B (or any other release I tested). --- src/rabbit_ssl.erl | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index 821dde99..cd0d1a92 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -164,9 +164,7 @@ escape_rdn_value([C | S], middle) -> format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; ST =:= universalString; ST =:= utf8String; ST =:= bmpString -> - if is_binary(S) -> format_directory_string(ST, binary_to_list(S)); - true -> format_directory_string(ST, S) - end; + format_directory_string(ST, S); format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2, $Z]}) -> io_lib:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ", @@ -198,11 +196,12 @@ format_asn1_value(V) -> %% %% (However according to that link X.680 actually defines %% TeletexString in some much more involved and crazy way. I suggest -%% we treat it as Windows CP1252). +%% we treat it as ISO-8859-1 since Erlang does not support Windows +%% Latin 1). %% %% bmpString: -%% UCS-2 according to RFC 3641. Hence cannot represent unicode characters -%% above 65535. +%% UCS-2 according to RFC 3641. Hence cannot represent Unicode +%% characters above 65535 (outside the "Basic Multilingual Plane"). %% %% universalString: %% UCS-4 according to RFC 3641. @@ -212,19 +211,37 @@ format_asn1_value(V) -> %% %% Within Rabbit we assume UTF-8 encoding. Since printableString is a %% subset of ASCII it is also a subset of UTF-8. The others need -%% converting. +%% converting. Fortunately since the Erlang SSL library does the +%% decoding for us (albeit into a weird format, see below), we just +%% need to handle encoding into UTF-8. %% %% Note for testing: the default Ubuntu configuration for openssl will %% only create printableString or teletexString types no matter what %% you do. Edit string_mask in the [req] section of %% /etc/ssl/openssl.cnf to change this (see comments there). You %% probably also need to set utf8 = yes to get it to accept UTF-8 on -%% the command line. -%% -%% TODO actually convert stuff here. +%% the command line. Also note I could not get openssl to generate a +%% universalString. format_directory_string(printableString, S) -> S; -format_directory_string(teletexString, S) -> S; -format_directory_string(bmpString, S) -> S; -format_directory_string(universalString, S) -> S; +format_directory_string(teletexString, S) -> utf8_list_from(S); +format_directory_string(bmpString, S) -> utf8_list_from(S); +format_directory_string(universalString, S) -> utf8_list_from(S); format_directory_string(utf8String, S) -> S. + +utf8_list_from(S) -> + binary_to_list( + unicode:characters_to_binary(flatten_ssl_list(S), utf32, utf8)). + +%% The Erlang SSL implementation invents its own representation for +%% non-ascii strings - looking like [97,{0,0,3,187}] (that's LATIN +%% SMALL LETTER A followed by GREEK SMALL LETTER LAMDA). We convert +%% this into a list of unicode characters, which we can tell +%% unicode:characters_to_binary is utf32. + +flatten_ssl_list(L) -> [flatten_ssl_list_item(I) || I <- L]. + +flatten_ssl_list_item({A, B, C, D}) -> + A * (1 bsl 24) + B * (1 bsl 16) + C * (1 bsl 8) + D; +flatten_ssl_list_item(N) when is_number (N) -> + N. -- cgit v1.2.1 From 2b965cec2671c62be3994bfb013e6eec0a7caac2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 14:18:26 +0100 Subject: Reduce duplication. --- src/rabbit_ssl.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index cd0d1a92..a3cd2b37 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -89,10 +89,8 @@ find_by_type(Type, {rdnSequence, RDNs}) -> case [V || #'AttributeTypeAndValue'{type = T, value = V} <- lists:flatten(RDNs), T == Type] of - [{ST, S}] when ST =:= teletexString; ST =:= printableString; - ST =:= universalString; ST =:= utf8String; - ST =:= bmpString -> format_directory_string(ST, S); - [] -> not_found + [Val] -> format_asn1_value(Val); + [] -> not_found end. %%-------------------------------------------------------------------------- -- cgit v1.2.1 From 4776943d229918e892c4a3205671d2fc8b2a150f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 16:14:28 +0100 Subject: Cosmetics / specs. --- src/rabbit.erl | 4 +--- src/rabbit_amqqueue.erl | 6 +++--- src/rabbit_binding.erl | 2 +- src/rabbit_exchange.erl | 2 +- src/rabbit_exchange_type_direct.erl | 2 +- src/rabbit_exchange_type_topic.erl | 4 +--- src/rabbit_tests.erl | 2 +- 7 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 2840a5b7..07316138 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -460,9 +460,7 @@ boot_delegate() -> rabbit_sup:start_child(delegate_sup, [Count]). recover() -> - XNames = rabbit_exchange:recover(), - QNames = rabbit_amqqueue:recover(), - rabbit_binding:recover(XNames, QNames). + rabbit_binding:recover(rabbit_exchange:recover(), rabbit_amqqueue:start()). maybe_insert_default_data() -> case rabbit_mnesia:is_db_empty() of diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 34ed88bc..e813d75c 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -16,7 +16,7 @@ -module(rabbit_amqqueue). --export([recover/0, stop/0, declare/5, delete_immediately/1, delete/3, +-export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, @@ -58,7 +58,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(recover/0 :: () -> [rabbit_types:amqqueue()]). +-spec(start/0 :: () -> [rabbit_amqqueue:name()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), @@ -158,7 +158,7 @@ %%---------------------------------------------------------------------------- -recover() -> +start() -> DurableQueues = find_durable_queues(), {ok, BQ} = application:get_env(rabbit, backing_queue_module), ok = BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 5ac9c871..5f120547 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -50,7 +50,7 @@ -opaque(deletions() :: dict()). --spec(recover/2 :: ([rabbit_types:resource()], [rabbit_types:resource()]) -> +-spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) -> 'ok'). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 7268b15d..86ce69ef 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> [rabbit_types:resource()]). +-spec(recover/0 :: () -> [rabbit_exchange:name()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 200c2997..4c56a1f8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -42,7 +42,7 @@ route(#exchange{name = Name}, validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index efa5fb52..2c995df8 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -47,9 +47,7 @@ route(#exchange{name = X}, end || RKey <- Routes]). validate(_X) -> ok. - -create(_Tx, _X) -> - ok. +create(_Tx, _X) -> ok. delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 995b84d9..e618156b 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2322,7 +2322,7 @@ test_queue_recover() -> after 10000 -> exit(timeout_waiting_for_queue_death) end, rabbit_amqqueue:stop(), - rabbit_amqqueue:recover(), + rabbit_amqqueue:start(), rabbit_amqqueue:with_or_die( QName, fun (Q1 = #amqqueue { pid = QPid1 }) -> -- cgit v1.2.1 From dac6e4ba2648131f4d79f871536c3b79e16d17d9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 16:24:02 +0100 Subject: Split up sync_binding. --- src/rabbit_binding.erl | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 5f120547..508d19bf 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -100,10 +100,8 @@ recover(XNames, QNames) -> XBs = rabbit_misc:table_fold( fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> case should_recover(B, XNameSet, QNameSet) of - true -> {_, Rev} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, Rev, - write), + true -> ok = sync_transient_binding( + Route, fun mnesia:write/3), rabbit_misc:dict_cons(Src, B, Acc); false -> Acc end @@ -287,16 +285,17 @@ binding_action(Binding = #binding{source = SrcName, Fun(Src, Dst, Binding#binding{args = SortedArgs}) end). -sync_binding(Binding, Durable, Fun) -> - ok = case Durable of - true -> Fun(rabbit_durable_route, - #route{binding = Binding}, write); - false -> ok - end, +sync_binding(Binding, true, Fun) -> + ok = Fun(rabbit_durable_route, #route{binding = Binding}, write), + ok = sync_transient_binding(Binding, Fun); + +sync_binding(Binding, false, Fun) -> + ok = sync_transient_binding(Binding, Fun). + +sync_transient_binding(Binding, Fun) -> {Route, ReverseRoute} = route_with_reverse(Binding), ok = Fun(rabbit_route, Route, write), - ok = Fun(rabbit_reverse_route, ReverseRoute, write), - ok. + ok = Fun(rabbit_reverse_route, ReverseRoute, write). call_with_source_and_destination(SrcName, DstName, Fun) -> SrcTable = table_for_resource(SrcName), -- cgit v1.2.1 From 9f9c7c5eabf3d6915afb0b0b62fe25d04101d50e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 16:35:51 +0100 Subject: rabbit_misc:execute_pre_post_mnesia_tx/1. --- src/rabbit_binding.erl | 5 ++--- src/rabbit_exchange.erl | 5 ++--- src/rabbit_misc.erl | 7 +++++++ 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 508d19bf..8c6732f9 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -106,9 +106,8 @@ recover(XNames, QNames) -> false -> Acc end end, dict:new(), rabbit_durable_route), - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> + rabbit_misc:execute_pre_post_mnesia_tx( + fun (Tx) -> dict:map(fun (XName, Bindings) -> {ok, X} = rabbit_exchange:lookup(XName), rabbit_exchange:callback(X, add_bindings, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 86ce69ef..b39fe32c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,9 +91,8 @@ recover() -> [_] -> Acc end end, [], rabbit_durable_exchange), - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> + rabbit_misc:execute_pre_post_mnesia_tx( + fun (Tx) -> [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] end), [XName || #exchange{name = XName} <- Xs]. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 2e9563cf..8927020f 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -34,6 +34,7 @@ -export([with_user/2, with_user_and_vhost/3]). -export([execute_mnesia_transaction/1]). -export([execute_mnesia_transaction/2]). +-export([execute_pre_post_mnesia_tx/1]). -export([execute_mnesia_tx_with_tail/1]). -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). @@ -135,6 +136,7 @@ -spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A). -spec(execute_mnesia_transaction/2 :: (thunk(A), fun ((A, boolean()) -> B)) -> B). +-spec(execute_pre_post_mnesia_tx/1 :: (fun ((boolean()) -> B)) -> B). -spec(execute_mnesia_tx_with_tail/1 :: (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))). -spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok'). @@ -404,6 +406,11 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> Result end), false). +%% Like the above, but without the main body. +execute_pre_post_mnesia_tx(PrePostCommitFun) -> + execute_mnesia_transaction(fun () -> ok end, + fun (ok, Tx) -> PrePostCommitFun(Tx) end). + %% Like execute_mnesia_transaction/2, but TxFun is expected to return a %% TailFun which gets called immediately before and after the tx commit execute_mnesia_tx_with_tail(TxFun) -> -- cgit v1.2.1 From 95f132f8a4324906a108208c37b720c8e811206d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 17:07:12 +0100 Subject: Invoke all the create and add_binding callbacks in the same tx. --- src/rabbit_binding.erl | 4 ++++ src/rabbit_exchange.erl | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8c6732f9..557a8f29 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -108,6 +108,10 @@ recover(XNames, QNames) -> end, dict:new(), rabbit_durable_route), rabbit_misc:execute_pre_post_mnesia_tx( fun (Tx) -> + [begin + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, create, [Tx, X]) + end|| XName <- XNames], dict:map(fun (XName, Bindings) -> {ok, X} = rabbit_exchange:lookup(XName), rabbit_exchange:callback(X, add_bindings, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b39fe32c..acbc6c90 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,10 +91,6 @@ recover() -> [_] -> Acc end end, [], rabbit_durable_exchange), - rabbit_misc:execute_pre_post_mnesia_tx( - fun (Tx) -> - [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] - end), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> -- cgit v1.2.1 From 797571fdd089c1217c8ff12fbf99990038b4dc3a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 11:49:07 +0100 Subject: Remove gratuitous export. --- src/rabbit_prelaunch.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index c8ad7c9c..79deb46c 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -16,7 +16,7 @@ -module(rabbit_prelaunch). --export([start/0, stop/0, duplicate_node_check/1]). +-export([start/0, stop/0]). -define(BaseApps, [rabbit]). -define(ERROR_CODE, 1). -- cgit v1.2.1 From 6bfb13e3561aaf85d2febb60fea3e89f440a3580 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 12:09:13 +0100 Subject: Revert bc5bcde98866 --- src/rabbit_binding.erl | 4 ---- src/rabbit_exchange.erl | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 557a8f29..8c6732f9 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -108,10 +108,6 @@ recover(XNames, QNames) -> end, dict:new(), rabbit_durable_route), rabbit_misc:execute_pre_post_mnesia_tx( fun (Tx) -> - [begin - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, create, [Tx, X]) - end|| XName <- XNames], dict:map(fun (XName, Bindings) -> {ok, X} = rabbit_exchange:lookup(XName), rabbit_exchange:callback(X, add_bindings, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index acbc6c90..b39fe32c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,6 +91,10 @@ recover() -> [_] -> Acc end end, [], rabbit_durable_exchange), + rabbit_misc:execute_pre_post_mnesia_tx( + fun (Tx) -> + [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] + end), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> -- cgit v1.2.1 From 050dc7df7081db4054191503e6b2dfe2e07ec901 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 12:24:13 +0100 Subject: Recover all the bindings in a single tx. --- src/rabbit_binding.erl | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8c6732f9..563fc0cf 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,15 +97,20 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - XBs = rabbit_misc:table_fold( - fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> - case should_recover(B, XNameSet, QNameSet) of - true -> ok = sync_transient_binding( - Route, fun mnesia:write/3), - rabbit_misc:dict_cons(Src, B, Acc); - false -> Acc - end - end, dict:new(), rabbit_durable_route), + XBs = rabbit_misc:execute_mnesia_transaction( + fun () -> + lists:foldl( + fun (Route = #route{ + binding = B = #binding{source = Src}}, Acc) -> + case should_recover(B, XNameSet, QNameSet) of + true -> ok = sync_transient_binding( + Route, fun mnesia:write/3), + rabbit_misc:dict_cons(Src, B, Acc); + false -> Acc + end + end, dict:new(), + mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])) + end), rabbit_misc:execute_pre_post_mnesia_tx( fun (Tx) -> dict:map(fun (XName, Bindings) -> -- cgit v1.2.1 From b010416a0a2ebf15cae4d4da211486a12f80b2aa Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 13:27:17 +0100 Subject: Recover and invoke callbacks for bindings in the same Tx. --- src/rabbit_binding.erl | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 563fc0cf..7c492778 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -95,32 +95,29 @@ routing_key, arguments]). recover(XNames, QNames) -> - XNameSet = sets:from_list(XNames), - QNameSet = sets:from_list(QNames), XBs = rabbit_misc:execute_mnesia_transaction( fun () -> - lists:foldl( - fun (Route = #route{ - binding = B = #binding{source = Src}}, Acc) -> - case should_recover(B, XNameSet, QNameSet) of - true -> ok = sync_transient_binding( - Route, fun mnesia:write/3), - rabbit_misc:dict_cons(Src, B, Acc); - false -> Acc - end - end, dict:new(), - mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])) + XBs = recover_internal(XNames, QNames), + callback_bindings(true, XBs), + XBs end), - rabbit_misc:execute_pre_post_mnesia_tx( - fun (Tx) -> - dict:map(fun (XName, Bindings) -> - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, add_bindings, - [Tx, X, Bindings]) - end, XBs) - end), + callback_bindings(false, XBs), ok. +recover_internal(XNames, QNames) -> + XNameSet = sets:from_list(XNames), + QNameSet = sets:from_list(QNames), + lists:foldl( + fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> + case should_recover(B, XNameSet, QNameSet) of + true -> ok = sync_transient_binding( + Route, fun mnesia:write/3), + rabbit_misc:dict_cons(Src, B, Acc); + false -> Acc + end + end, dict:new(), + mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])). + should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, XNameSet, QNameSet) -> case mnesia:read({rabbit_route, B}) of @@ -131,6 +128,13 @@ should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, _ -> false end. +callback_bindings(Tx, XBs) -> + dict:map(fun (XName, Bindings) -> + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, add_bindings, + [Tx, X, Bindings]) + end, XBs). + exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> -- cgit v1.2.1 From 8745389ddb413ed3a326f2c4a989f7ad2e7105ce Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 16:57:44 +0100 Subject: UTF8strings come back as binaries. --- src/rabbit_ssl.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index a3cd2b37..e0defa9e 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -211,7 +211,8 @@ format_asn1_value(V) -> %% subset of ASCII it is also a subset of UTF-8. The others need %% converting. Fortunately since the Erlang SSL library does the %% decoding for us (albeit into a weird format, see below), we just -%% need to handle encoding into UTF-8. +%% need to handle encoding into UTF-8. Note also that utf8Strings come +%% back as binary. %% %% Note for testing: the default Ubuntu configuration for openssl will %% only create printableString or teletexString types no matter what @@ -225,7 +226,7 @@ format_directory_string(printableString, S) -> S; format_directory_string(teletexString, S) -> utf8_list_from(S); format_directory_string(bmpString, S) -> utf8_list_from(S); format_directory_string(universalString, S) -> utf8_list_from(S); -format_directory_string(utf8String, S) -> S. +format_directory_string(utf8String, S) -> binary_to_list(S). utf8_list_from(S) -> binary_to_list( -- cgit v1.2.1 From 22bdb4ffd079fb5aa842e1673de888be46029ab0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 6 Apr 2011 11:33:14 +0100 Subject: It is not an error if the file does not exist --- src/rabbit_msg_store.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 65688142..3f4162cd 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1891,7 +1891,10 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), - ok = file:delete(filename:join(Dir, ?CLEAN_FILENAME)), + case file:delete(filename:join(Dir, ?CLEAN_FILENAME)) of + ok -> ok; + {error, enoent} -> ok + end, recover_crashed_compactions(BaseDir), ok. -- cgit v1.2.1 From f2d3b189ad34e01e80841a48552622577cb069f8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:01:46 +0100 Subject: Split back up into lots of little txs. --- src/rabbit_binding.erl | 49 ++++++++++++++++++------------------------------- src/rabbit_exchange.erl | 11 ++++++----- src/rabbit_misc.erl | 12 +++++++----- 3 files changed, 31 insertions(+), 41 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7c492778..c71a21f1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -95,45 +95,32 @@ routing_key, arguments]). recover(XNames, QNames) -> - XBs = rabbit_misc:execute_mnesia_transaction( - fun () -> - XBs = recover_internal(XNames, QNames), - callback_bindings(true, XBs), - XBs - end), - callback_bindings(false, XBs), - ok. - -recover_internal(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - lists:foldl( - fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> + rabbit_misc:table_fold( + fun (Route = #route{binding = B}, _Acc) -> case should_recover(B, XNameSet, QNameSet) of true -> ok = sync_transient_binding( Route, fun mnesia:write/3), - rabbit_misc:dict_cons(Src, B, Acc); - false -> Acc + B; + false -> none end - end, dict:new(), - mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])). + end, + fun (none, _Tx) -> + ok; + (B = #binding{source = Src}, Tx) -> + {ok, X} = rabbit_exchange:lookup(Src), + rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) + end, + none, rabbit_durable_route), + ok. -should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, +should_recover(#binding{destination = Dst = #resource{ kind = Kind }}, XNameSet, QNameSet) -> - case mnesia:read({rabbit_route, B}) of - [] -> sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end); - _ -> false - end. - -callback_bindings(Tx, XBs) -> - dict:map(fun (XName, Bindings) -> - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, add_bindings, - [Tx, X, Bindings]) - end, XBs). + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end). exists(Binding) -> binding_action( diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b39fe32c..bc2d5b29 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -90,11 +90,12 @@ recover() -> [X | Acc]; [_] -> Acc end - end, [], rabbit_durable_exchange), - rabbit_misc:execute_pre_post_mnesia_tx( - fun (Tx) -> - [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] - end), + end, + fun (Acc = [X | _], Tx) -> + rabbit_exchange:callback(X, create, [Tx, X]), + Acc + end, + [], rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 8927020f..28c4596e 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -39,7 +39,7 @@ -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). -export([upmap/2, map_in_order/2]). --export([table_fold/3]). +-export([table_fold/4]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). @@ -148,7 +148,8 @@ -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_fold/3 :: (fun ((any(), A) -> A), A, atom()) -> A). +-spec(table_fold/4 :: (fun ((any(), A) -> A), fun ((A, boolean()) -> A), A, + atom()) -> A). -spec(dirty_read_all/1 :: (atom()) -> [any()]). -spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> 'ok' | 'aborted'). @@ -473,14 +474,15 @@ map_in_order(F, L) -> %% around the lot. %% %% We ignore entries that have been modified or removed. -table_fold(F, Acc0, TableName) -> +table_fold(Fun, PrePostCommitFun, Acc0, TableName) -> lists:foldl( fun (E, Acc) -> execute_mnesia_transaction( fun () -> case mnesia:match_object(TableName, E, read) of [] -> Acc; - _ -> F(E, Acc) + _ -> Fun(E, Acc) end - end) + end, + PrePostCommitFun) end, Acc0, dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From dc5425baa1fbb1d32bbb08ad0d72f9e3788fbdc9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:04:07 +0100 Subject: No longer needed. --- src/rabbit_misc.erl | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 28c4596e..324ec534 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -34,7 +34,6 @@ -export([with_user/2, with_user_and_vhost/3]). -export([execute_mnesia_transaction/1]). -export([execute_mnesia_transaction/2]). --export([execute_pre_post_mnesia_tx/1]). -export([execute_mnesia_tx_with_tail/1]). -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). @@ -136,7 +135,6 @@ -spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A). -spec(execute_mnesia_transaction/2 :: (thunk(A), fun ((A, boolean()) -> B)) -> B). --spec(execute_pre_post_mnesia_tx/1 :: (fun ((boolean()) -> B)) -> B). -spec(execute_mnesia_tx_with_tail/1 :: (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))). -spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok'). @@ -407,11 +405,6 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> Result end), false). -%% Like the above, but without the main body. -execute_pre_post_mnesia_tx(PrePostCommitFun) -> - execute_mnesia_transaction(fun () -> ok end, - fun (ok, Tx) -> PrePostCommitFun(Tx) end). - %% Like execute_mnesia_transaction/2, but TxFun is expected to return a %% TailFun which gets called immediately before and after the tx commit execute_mnesia_tx_with_tail(TxFun) -> -- cgit v1.2.1 From 6f9e91a2d0d38a1bcd680acbb4e2782d975c6a5d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:17:12 +0100 Subject: Check for presence of binding in rabbit_durable_route on add. --- src/rabbit_binding.erl | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index c71a21f1..d293c812 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -142,14 +142,11 @@ add(Binding, InnerFun) -> case InnerFun(Src, Dst) of ok -> case mnesia:read({rabbit_route, B}) of - [] -> ok = sync_binding(B, all_durable([Src, Dst]), - fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback( - Src, add_bindings, - [Tx, Src, [B]]), - rabbit_event:notify_if( - not Tx, binding_created, info(B)) + [] -> case mnesia:read({rabbit_durable_route, B}) of + [] -> add_internal(Src, Dst, B); + %% Binding exists, to queue on node which + %% is in the middle of starting + [_] -> rabbit_misc:const(not_found) end; [_] -> fun rabbit_misc:const_ok/1 end; @@ -158,6 +155,13 @@ add(Binding, InnerFun) -> end end). +add_internal(Src, Dst, B) -> + ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback(Src, add_bindings, [Tx, Src, [B]]), + rabbit_event:notify_if(not Tx, binding_created, info(B)) + end. + remove(Binding, InnerFun) -> binding_action( Binding, -- cgit v1.2.1 From 4002f7243e0f392d6ceba4f779732f160321179e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:23:04 +0100 Subject: Check the route still exists. --- src/rabbit_binding.erl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index d293c812..7131ab21 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -115,12 +115,15 @@ recover(XNames, QNames) -> none, rabbit_durable_route), ok. -should_recover(#binding{destination = Dst = #resource{ kind = Kind }}, +should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, XNameSet, QNameSet) -> - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end). + case mnesia:read({rabbit_durable_route, B}) of + [] -> false; %% It disappeared between getting the list and here + [_] -> sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) + end. exists(Binding) -> binding_action( -- cgit v1.2.1 From a5b4048d515d9e2b9fad6ed45785f1bfb7c60d1c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:36:58 +0100 Subject: Refactor a bit, and only check rabbit_durable_route if the binding might be durable. --- src/rabbit_binding.erl | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7131ab21..2ae7c973 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -143,26 +143,30 @@ add(Binding, InnerFun) -> %% in general, we want to fail on that in preference to %% anything else case InnerFun(Src, Dst) of - ok -> - case mnesia:read({rabbit_route, B}) of - [] -> case mnesia:read({rabbit_durable_route, B}) of - [] -> add_internal(Src, Dst, B); - %% Binding exists, to queue on node which - %% is in the middle of starting - [_] -> rabbit_misc:const(not_found) - end; - [_] -> fun rabbit_misc:const_ok/1 - end; - {error, _} = Err -> - rabbit_misc:const(Err) + ok -> add(Src, Dst, B); + {error, _} = Err -> rabbit_misc:const(Err) end end). -add_internal(Src, Dst, B) -> - ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback(Src, add_bindings, [Tx, Src, [B]]), - rabbit_event:notify_if(not Tx, binding_created, info(B)) +add(Src, Dst, B) -> + case mnesia:read({rabbit_route, B}) of + [] -> Durable = all_durable([Src, Dst]), + case (not Durable orelse + mnesia:read({rabbit_durable_route, B}) =:= []) of + true -> + ok = sync_binding(B, Durable, fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback(Src, add_bindings, + [Tx, Src, [B]]), + rabbit_event:notify_if(not Tx, binding_created, + info(B)) + end; + %% Binding exists, to queue on node which + %% is in the middle of starting + false -> + rabbit_misc:const(not_found) + end; + [_] -> fun rabbit_misc:const_ok/1 end. remove(Binding, InnerFun) -> -- cgit v1.2.1 From 5689000c5a62bcb866e352f6f5497a38350b1ffc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:44:56 +0100 Subject: Matthias prefers this. --- src/rabbit_binding.erl | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 2ae7c973..e02427bc 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -98,8 +98,16 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), rabbit_misc:table_fold( - fun (Route = #route{binding = B}, _Acc) -> - case should_recover(B, XNameSet, QNameSet) of + fun (Route = #route{binding = B = + #binding{destination = Dst = + #resource{kind = Kind}}}, _Acc) -> + %% The check against rabbit_durable_route is in case it + %% disappeared between getting the list and here + case (not mnesia:read({rabbit_durable_route, B}) =:= [] andalso + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end)) of true -> ok = sync_transient_binding( Route, fun mnesia:write/3), B; @@ -115,16 +123,6 @@ recover(XNames, QNames) -> none, rabbit_durable_route), ok. -should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, - XNameSet, QNameSet) -> - case mnesia:read({rabbit_durable_route, B}) of - [] -> false; %% It disappeared between getting the list and here - [_] -> sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end) - end. - exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> -- cgit v1.2.1 From d5248d1d98a27a5ee030157efee4cffa623e3388 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:48:04 +0100 Subject: Acc may be empty. --- src/rabbit_exchange.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index bc2d5b29..a2684782 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,7 +91,9 @@ recover() -> [_] -> Acc end end, - fun (Acc = [X | _], Tx) -> + fun ([], _Tx) -> + []; + (Acc = [X | _], Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), Acc end, -- cgit v1.2.1 From 3aa8f702f49d253526b9571cc2a63bca0a0ee516 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:59:54 +0100 Subject: Damn priorities. --- src/rabbit_binding.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index e02427bc..b7bebc39 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -103,7 +103,7 @@ recover(XNames, QNames) -> #resource{kind = Kind}}}, _Acc) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - case (not mnesia:read({rabbit_durable_route, B}) =:= [] andalso + case (not (mnesia:read({rabbit_durable_route, B}) =:= []) andalso sets:is_element(Dst, case Kind of exchange -> XNameSet; queue -> QNameSet -- cgit v1.2.1 From 03677a7ac3c173a83f5e29123d7cafde96cb4ef6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 17:08:54 +0100 Subject: table_fold -> table_map --- src/rabbit_binding.erl | 21 +++++++++++---------- src/rabbit_exchange.erl | 18 ++++++++---------- src/rabbit_misc.erl | 27 +++++++++++++++------------ 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b7bebc39..b2d84143 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,17 +97,17 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - rabbit_misc:table_fold( + rabbit_misc:table_map( fun (Route = #route{binding = B = #binding{destination = Dst = - #resource{kind = Kind}}}, _Acc) -> + #resource{kind = Kind}}}) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - case (not (mnesia:read({rabbit_durable_route, B}) =:= []) andalso - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end)) of + case mnesia:read({rabbit_durable_route, B}) =/= [] andalso + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) of true -> ok = sync_transient_binding( Route, fun mnesia:write/3), B; @@ -115,12 +115,13 @@ recover(XNames, QNames) -> end end, fun (none, _Tx) -> - ok; + none; (B = #binding{source = Src}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) + rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]), + B end, - none, rabbit_durable_route), + rabbit_durable_route), ok. exists(Binding) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a2684782..2fe98e4b 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -83,21 +83,19 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_fold( - fun (X = #exchange{name = XName}, Acc) -> + Xs = rabbit_misc:table_map( + fun (X = #exchange{name = XName}) -> case mnesia:read({rabbit_exchange, XName}) of [] -> ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc]; - [_] -> Acc + X; + [_] -> none end end, - fun ([], _Tx) -> - []; - (Acc = [X | _], Tx) -> - rabbit_exchange:callback(X, create, [Tx, X]), - Acc + fun (none, _Tx) -> none; + (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), + X end, - [], rabbit_durable_exchange), + rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 324ec534..6bebf005 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -38,7 +38,7 @@ -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). -export([upmap/2, map_in_order/2]). --export([table_fold/4]). +-export([table_map/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). @@ -146,8 +146,7 @@ -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_fold/4 :: (fun ((any(), A) -> A), fun ((A, boolean()) -> A), A, - atom()) -> A). +-spec(table_map/3 :: (fun ((A) -> A), fun ((A, boolean()) -> A), atom()) -> A). -spec(dirty_read_all/1 :: (atom()) -> [any()]). -spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> 'ok' | 'aborted'). @@ -467,16 +466,20 @@ map_in_order(F, L) -> %% around the lot. %% %% We ignore entries that have been modified or removed. -table_fold(Fun, PrePostCommitFun, Acc0, TableName) -> +table_map(Fun, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, read) of - [] -> Acc; - _ -> Fun(E, Acc) - end - end, - PrePostCommitFun) - end, Acc0, dirty_read_all(TableName)). + fun (E, Acc) -> case execute_mnesia_transaction( + fun () -> case mnesia:match_object(TableName, E, + read) of + [] -> Acc; + _ -> Fun(E) + end + end, + PrePostCommitFun) of + none -> Acc; + Res -> [Res | Acc] + end + end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> mnesia:dirty_select(TableName, [{'$1',[],['$1']}]). -- cgit v1.2.1 From e5a47db3cf448c377ba067ad3b2ab0dcd0b42d52 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 15:40:14 +0100 Subject: Reinstate trap_exit in channel. --- src/rabbit_channel.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 5099bf3f..0c12614c 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -156,6 +156,7 @@ ready_for_close(Pid) -> init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun]) -> + process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), StatsTimer = rabbit_event:init_stats_timer(), State = #ch{state = starting, -- cgit v1.2.1 From f468d313fb5f4e8ba8c25cae1670c2fa9c56930c Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 7 Apr 2011 17:28:22 +0100 Subject: changelog entries for 2.4.1 --- packaging/RPMS/Fedora/rabbitmq-server.spec | 3 +++ packaging/debs/Debian/debian/changelog | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index 45af770a..f9e9df8b 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -120,6 +120,9 @@ done rm -rf %{buildroot} %changelog +* Thu Apr 7 2011 Alexandru Scvortov 2.4.1-1 +- New Upstream Release + * Tue Mar 22 2011 Alexandru Scvortov 2.4.0-1 - New Upstream Release diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog index 2ca5074f..0383b955 100644 --- a/packaging/debs/Debian/debian/changelog +++ b/packaging/debs/Debian/debian/changelog @@ -1,3 +1,9 @@ +rabbitmq-server (2.4.1-1) lucid; urgency=low + + * New Upstream Release + + -- Alexandru Scvortov Thu, 07 Apr 2011 16:49:22 +0100 + rabbitmq-server (2.4.0-1) lucid; urgency=low * New Upstream Release -- cgit v1.2.1 -- cgit v1.2.1 From 34c34d59585af7ae7e51e30301765a5eadff7609 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 00:24:02 +0100 Subject: cosmetic --- src/rabbit_amqqueue.erl | 5 ++--- src/rabbit_binding.erl | 20 ++++++++------------ src/rabbit_exchange.erl | 2 +- 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e813d75c..77d3841b 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -16,8 +16,7 @@ -module(rabbit_amqqueue). --export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, - purge/1]). +-export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, @@ -58,7 +57,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(start/0 :: () -> [rabbit_amqqueue:name()]). +-spec(start/0 :: () -> [name()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b2d84143..8633ed13 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -152,18 +152,14 @@ add(Src, Dst, B) -> [] -> Durable = all_durable([Src, Dst]), case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> - ok = sync_binding(B, Durable, fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback(Src, add_bindings, - [Tx, Src, [B]]), - rabbit_event:notify_if(not Tx, binding_created, - info(B)) - end; - %% Binding exists, to queue on node which - %% is in the middle of starting - false -> - rabbit_misc:const(not_found) + true -> ok = sync_binding(B, Durable, fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback( + Src, add_bindings, [Tx, Src, [B]]), + rabbit_event:notify_if( + not Tx, binding_created, info(B)) + end; + false -> rabbit_misc:const(not_found) end; [_] -> fun rabbit_misc:const_ok/1 end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 2fe98e4b..623adf0b 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> [rabbit_exchange:name()]). +-spec(recover/0 :: () -> [name()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), -- cgit v1.2.1 From 3e13f9831edcd4ffc35925b8230b8e4e3b5fd3eb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 11:06:22 +0100 Subject: misc:table_map -> misc:table_filter. --- src/rabbit_binding.erl | 25 ++++++++++--------------- src/rabbit_exchange.erl | 19 +++++++++---------- src/rabbit_misc.erl | 31 ++++++++++++++++--------------- 3 files changed, 35 insertions(+), 40 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8633ed13..ec64c474 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,29 +97,24 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - rabbit_misc:table_map( - fun (Route = #route{binding = B = - #binding{destination = Dst = - #resource{kind = Kind}}}) -> + rabbit_misc:table_filter( + fun (#route{binding = B = #binding{destination = Dst = + #resource{kind = Kind}}}) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - case mnesia:read({rabbit_durable_route, B}) =/= [] andalso + mnesia:read({rabbit_durable_route, B}) =/= [] andalso sets:is_element(Dst, case Kind of exchange -> XNameSet; queue -> QNameSet - end) of - true -> ok = sync_transient_binding( - Route, fun mnesia:write/3), - B; - false -> none - end + end) end, - fun (none, _Tx) -> - none; - (B = #binding{source = Src}, Tx) -> + fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]), - B + case Tx of + true -> ok = sync_transient_binding(R, fun mnesia:write/3); + false -> ok + end end, rabbit_durable_route), ok. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 623adf0b..3e4edba4 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -83,17 +83,16 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_map( - fun (X = #exchange{name = XName}) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> ok = mnesia:write(rabbit_exchange, X, write), - X; - [_] -> none - end + Xs = rabbit_misc:table_filter( + fun (#exchange{name = XName}) -> + mnesia:read({rabbit_exchange, XName}) =:= [] end, - fun (none, _Tx) -> none; - (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), - X + fun (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), + case Tx of + true -> ok = mnesia:write(rabbit_exchange, + X, write); + false -> ok + end end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 6bebf005..adc3ae66 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -38,7 +38,7 @@ -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). -export([upmap/2, map_in_order/2]). --export([table_map/3]). +-export([table_filter/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). @@ -146,7 +146,8 @@ -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_map/3 :: (fun ((A) -> A), fun ((A, boolean()) -> A), atom()) -> A). +-spec(table_filter/3:: (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'), + atom()) -> [A]). -spec(dirty_read_all/1 :: (atom()) -> [any()]). -spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> 'ok' | 'aborted'). @@ -461,24 +462,24 @@ map_in_order(F, L) -> lists:reverse( lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)). -%% Fold over each entry in a table, executing the cons function in a +%% Fold over each entry in a table, executing the pre-post-commit function in a %% transaction. This is often far more efficient than wrapping a tx %% around the lot. %% %% We ignore entries that have been modified or removed. -table_map(Fun, PrePostCommitFun, TableName) -> +table_filter(Pred, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> case execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, - read) of - [] -> Acc; - _ -> Fun(E) - end - end, - PrePostCommitFun) of - none -> Acc; - Res -> [Res | Acc] - end + fun (E, Acc) -> execute_mnesia_transaction( + fun () -> case mnesia:match_object(TableName, E, + read) of + [] -> false; + _ -> Pred(E) + end + end, + fun (false, _Tx) -> Acc; + (true, Tx) -> PrePostCommitFun(E, Tx), + [E | Acc] + end) end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From 1656e6f9a5083f56ad69082b55dd72f6fddf8a7c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 11:41:08 +0100 Subject: Callbacks should come after mnesia writes. --- src/rabbit_binding.erl | 4 ++-- src/rabbit_exchange.erl | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index ec64c474..611f7909 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -110,11 +110,11 @@ recover(XNames, QNames) -> end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]), case Tx of true -> ok = sync_transient_binding(R, fun mnesia:write/3); false -> ok - end + end, + rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) end, rabbit_durable_route), ok. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 3e4edba4..a74f9d28 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -87,12 +87,12 @@ recover() -> fun (#exchange{name = XName}) -> mnesia:read({rabbit_exchange, XName}) =:= [] end, - fun (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), - case Tx of + fun (X, Tx) -> case Tx of true -> ok = mnesia:write(rabbit_exchange, X, write); false -> ok - end + end, + rabbit_exchange:callback(X, create, [Tx, X]) end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. -- cgit v1.2.1 From 77784cf6bd022e7a83bd8637d459b33949dca618 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 12:10:17 +0100 Subject: Go back to add_binding. --- include/rabbit_exchange_type_spec.hrl | 4 ++-- src/rabbit_binding.erl | 4 ++-- src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_direct.erl | 4 ++-- src/rabbit_exchange_type_fanout.erl | 4 ++-- src/rabbit_exchange_type_headers.erl | 4 ++-- src/rabbit_exchange_type_topic.erl | 10 ++++------ src/rabbit_tests.erl | 2 +- 8 files changed, 16 insertions(+), 18 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index fd3ddf7e..c80cc196 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -23,8 +23,8 @@ -spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). -spec(delete/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(add_bindings/3 :: (boolean(), rabbit_types:exchange(), - [rabbit_types:binding()]) -> 'ok'). +-spec(add_binding/3 :: (boolean(), rabbit_types:exchange(), + rabbit_types:binding()) -> 'ok'). -spec(remove_bindings/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(assert_args_equivalence/2 :: diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 611f7909..0fb0baf3 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -114,7 +114,7 @@ recover(XNames, QNames) -> true -> ok = sync_transient_binding(R, fun mnesia:write/3); false -> ok end, - rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) + rabbit_exchange:callback(X, add_binding, [Tx, X, B]) end, rabbit_durable_route), ok. @@ -150,7 +150,7 @@ add(Src, Dst, B) -> true -> ok = sync_binding(B, Durable, fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback( - Src, add_bindings, [Tx, Src, [B]]), + Src, add_binding, [Tx, Src, B]), rabbit_event:notify_if( not Tx, binding_created, info(B)) end; diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 0fede0be..b2400098 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -33,7 +33,7 @@ behaviour_info(callbacks) -> {delete, 3}, %% called after a binding has been added or bindings have been recovered - {add_bindings, 3}, + {add_binding, 3}, %% called after bindings have been deleted. {remove_bindings, 3}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 4c56a1f8..40078b1a 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -21,7 +21,7 @@ -export([description/0, route/2]). -export([validate/1, create/2, delete/3, - add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). + add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). -rabbit_boot_step({?MODULE, @@ -42,7 +42,7 @@ route(#exchange{name = Name}, validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 62568949..f32ef917 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -41,7 +41,7 @@ route(#exchange{name = Name}, _Delivery) -> validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 258e785a..139feb04 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -115,7 +115,7 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2c995df8..cdc95226 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -56,12 +56,10 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_bindings(true, _X, Bs) -> +add_binding(true, _X, B) -> rabbit_misc:execute_mnesia_transaction( - fun () -> - lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end); -add_bindings(false, _X, _Bs) -> + fun () -> internal_add_binding(B) end); +add_binding(false, _X, _B) -> ok. remove_bindings(true, #exchange{name = X}, Bs) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index e618156b..c029412d 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -629,7 +629,7 @@ test_topic_matching() -> {"#.#.#", "t24"}, {"*", "t25"}, {"#.b.#", "t26"}]], - lists:foreach(fun (B) -> exchange_op_callback(X, add_bindings, [[B]]) end, + lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), %% test some matches -- cgit v1.2.1 From 13b5acf1f9bab327405c73939cf7cb11df97530e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 12:13:34 +0100 Subject: Improve comment, minimise difference from default. --- src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_topic.erl | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index b2400098..cd96407c 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -32,7 +32,7 @@ behaviour_info(callbacks) -> %% called after exchange (auto)deletion. {delete, 3}, - %% called after a binding has been added or bindings have been recovered + %% called after a binding has been added or recovered {add_binding, 3}, %% called after bindings have been deleted. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index cdc95226..5cec5b41 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -56,10 +56,9 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_binding(true, _X, B) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> internal_add_binding(B) end); -add_binding(false, _X, _B) -> +add_binding(true, _Exchange, Binding) -> + internal_add_binding(Binding); +add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, #exchange{name = X}, Bs) -> -- cgit v1.2.1 From b3eb94ab9da72a897860109e80591b10e3fa4f08 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 12:49:52 +0100 Subject: Cut down on reads. --- src/rabbit_binding.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 0fb0baf3..c9106711 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -102,11 +102,11 @@ recover(XNames, QNames) -> #resource{kind = Kind}}}) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - mnesia:read({rabbit_durable_route, B}) =/= [] andalso - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end) + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) andalso + mnesia:read({rabbit_durable_route, B}) =/= [] end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), -- cgit v1.2.1 From a10d90887efdd9eb0f8a588a8a8c94b15d1eb0aa Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 12:55:42 +0100 Subject: cosmetic --- src/rabbit_binding.erl | 2 +- src/rabbit_exchange.erl | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index c9106711..ca7be59a 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -109,11 +109,11 @@ recover(XNames, QNames) -> mnesia:read({rabbit_durable_route, B}) =/= [] end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> - {ok, X} = rabbit_exchange:lookup(Src), case Tx of true -> ok = sync_transient_binding(R, fun mnesia:write/3); false -> ok end, + {ok, X} = rabbit_exchange:lookup(Src), rabbit_exchange:callback(X, add_binding, [Tx, X, B]) end, rabbit_durable_route), diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a74f9d28..42111773 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -87,12 +87,12 @@ recover() -> fun (#exchange{name = XName}) -> mnesia:read({rabbit_exchange, XName}) =:= [] end, - fun (X, Tx) -> case Tx of - true -> ok = mnesia:write(rabbit_exchange, - X, write); - false -> ok - end, - rabbit_exchange:callback(X, create, [Tx, X]) + fun (X, Tx) -> + case Tx of + true -> ok = mnesia:write(rabbit_exchange, X, write); + false -> ok + end, + rabbit_exchange:callback(X, create, [Tx, X]) end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. -- cgit v1.2.1 From ab9625d32692ff221a449d3952a911e840ffa944 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 18:10:19 +0100 Subject: Don't cons inside the tx, prevents us from copying the accumulator on the way into the worker pool at great cost. --- src/rabbit_misc.erl | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 85e08615..814a5bbc 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -467,17 +467,20 @@ map_in_order(F, L) -> %% We ignore entries that have been modified or removed. table_filter(Pred, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, - read) of - [] -> false; - _ -> Pred(E) - end - end, - fun (false, _Tx) -> Acc; - (true, Tx) -> PrePostCommitFun(E, Tx), - [E | Acc] - end) + fun (E, Acc) -> case execute_mnesia_transaction( + fun () -> case mnesia:match_object(TableName, E, + read) of + [] -> false; + _ -> Pred(E) + end + end, + fun (false, _Tx) -> false; + (true, Tx) -> PrePostCommitFun(E, Tx), + true + end) of + false -> Acc; + true -> [E | Acc] + end end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From c8d1a130a5bb48c30e65399ad416b37a27e53afd Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 18:33:44 +0100 Subject: shrink --- src/rabbit_misc.erl | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 814a5bbc..87181c24 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -467,20 +467,16 @@ map_in_order(F, L) -> %% We ignore entries that have been modified or removed. table_filter(Pred, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> case execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, - read) of - [] -> false; - _ -> Pred(E) - end - end, - fun (false, _Tx) -> false; - (true, Tx) -> PrePostCommitFun(E, Tx), - true - end) of - false -> Acc; - true -> [E | Acc] - end + fun (E, Acc) -> + case execute_mnesia_transaction( + fun () -> mnesia:match_object(TableName, E, read) =/= [] + andalso Pred(E) end, + fun (false, _Tx) -> false; + (true, Tx) -> PrePostCommitFun(E, Tx), true + end) of + false -> Acc; + true -> [E | Acc] + end end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From 98a8472c6f52abb6dcd198ed07a395d337cf35fa Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 20:08:45 +0100 Subject: remove duplicate check rabbit_misc:table_filter already filters out elements which have disappeared. --- src/rabbit_binding.erl | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index ca7be59a..7d13ea29 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -98,15 +98,12 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), rabbit_misc:table_filter( - fun (#route{binding = B = #binding{destination = Dst = - #resource{kind = Kind}}}) -> - %% The check against rabbit_durable_route is in case it - %% disappeared between getting the list and here + fun (#route{binding = #binding{destination = Dst = + #resource{kind = Kind}}}) -> sets:is_element(Dst, case Kind of exchange -> XNameSet; queue -> QNameSet - end) andalso - mnesia:read({rabbit_durable_route, B}) =/= [] + end) end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> case Tx of -- cgit v1.2.1 From de9c5b5bd077da91ab3dd09b1654e4d0bd650452 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 20:09:17 +0100 Subject: cosmetic: update comment on table_filter --- src/rabbit_misc.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 87181c24..cec10ff6 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -460,9 +460,8 @@ map_in_order(F, L) -> lists:reverse( lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)). -%% Fold over each entry in a table, executing the pre-post-commit function in a -%% transaction. This is often far more efficient than wrapping a tx -%% around the lot. +%% Apply a pre-post-commit function to all entries in a table that +%% satisfy a predicate, and return those entries. %% %% We ignore entries that have been modified or removed. table_filter(Pred, PrePostCommitFun, TableName) -> -- cgit v1.2.1 From 9456939f2ad57435fa19975bc552762ed722d83b Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 22:08:38 +0100 Subject: cosmetic changes and minor tweaks to rabbit_binding:{add,remove} - align 'add' and 'remove' structurally, with the isomorphic final phases extracted into helper funs - call 'read' instead of 'match_object' to check for binding presence in 'remove' - cleaner and possibly slightly more efficient --- src/rabbit_binding.erl | 64 ++++++++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7d13ea29..0fb0639a 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -124,8 +124,6 @@ exists(Binding) -> add(Binding) -> add(Binding, fun (_Src, _Dst) -> ok end). -remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). - add(Binding, InnerFun) -> binding_action( Binding, @@ -134,56 +132,46 @@ add(Binding, InnerFun) -> %% in general, we want to fail on that in preference to %% anything else case InnerFun(Src, Dst) of - ok -> add(Src, Dst, B); + ok -> case mnesia:read({rabbit_route, B}) of + [] -> add(Src, Dst, B); + [_] -> fun rabbit_misc:const_ok/1 + end; {error, _} = Err -> rabbit_misc:const(Err) end end). add(Src, Dst, B) -> - case mnesia:read({rabbit_route, B}) of - [] -> Durable = all_durable([Src, Dst]), - case (not Durable orelse - mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> ok = sync_binding(B, Durable, fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback( - Src, add_binding, [Tx, Src, B]), - rabbit_event:notify_if( - not Tx, binding_created, info(B)) - end; - false -> rabbit_misc:const(not_found) - end; - [_] -> fun rabbit_misc:const_ok/1 + Durable = all_durable([Src, Dst]), + case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of + true -> ok = sync_binding(B, Durable, fun mnesia:write/3), + fun (Tx) -> ok = rabbit_exchange:callback(Src, add_binding, + [Tx, Src, B]), + rabbit_event:notify_if(not Tx, binding_created, + info(B)) + end; + false -> rabbit_misc:const(not_found) end. +remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). + remove(Binding, InnerFun) -> binding_action( Binding, fun (Src, Dst, B) -> - Result = - case mnesia:match_object(rabbit_route, #route{binding = B}, - write) of - [] -> - {error, binding_not_found}; - [_] -> - case InnerFun(Src, Dst) of - ok -> - ok = sync_binding(B, all_durable([Src, Dst]), - fun mnesia:delete_object/3), - {ok, maybe_auto_delete(B#binding.source, - [B], new_deletions())}; - {error, _} = E -> - E - end - end, - case Result of - {error, _} = Err -> - rabbit_misc:const(Err); - {ok, Deletions} -> - fun (Tx) -> ok = process_deletions(Deletions, Tx) end + case mnesia:read(rabbit_route, B, write) of + [] -> rabbit_misc:const({error, binding_not_found}); + [_] -> case InnerFun(Src, Dst) of + ok -> remove(Src, Dst, B); + {error, _} = Err -> rabbit_misc:const(Err) + end end end). +remove(Src, Dst, B) -> + ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:delete_object/3), + Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()), + fun (Tx) -> ok = process_deletions(Deletions, Tx) end. + list(VHostPath) -> VHostResource = rabbit_misc:r(VHostPath, '_'), Route = #route{binding = #binding{source = VHostResource, -- cgit v1.2.1 From 4499806171ae66cbb08b02a4309e876ff5efc0d7 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 9 Apr 2011 00:15:52 +0100 Subject: tiny refactor --- src/rabbit_binding.erl | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 0fb0639a..b0a59a0c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -281,17 +281,16 @@ sync_transient_binding(Binding, Fun) -> call_with_source_and_destination(SrcName, DstName, Fun) -> SrcTable = table_for_resource(SrcName), DstTable = table_for_resource(DstName), - ErrFun = fun (Err) -> rabbit_misc:const(Err) end, + ErrFun = fun (Err) -> rabbit_misc:const({error, Err}) end, rabbit_misc:execute_mnesia_tx_with_tail( fun () -> case {mnesia:read({SrcTable, SrcName}), mnesia:read({DstTable, DstName})} of {[Src], [Dst]} -> Fun(Src, Dst); - {[], [_] } -> ErrFun({error, source_not_found}); - {[_], [] } -> ErrFun({error, destination_not_found}); - {[], [] } -> ErrFun({error, - source_and_destination_not_found}) - end + {[], [_] } -> ErrFun(source_not_found); + {[_], [] } -> ErrFun(destination_not_found); + {[], [] } -> ErrFun(source_and_destination_not_found) + end end). table_for_resource(#resource{kind = exchange}) -> rabbit_exchange; -- cgit v1.2.1 From 17010d0bd4e1db3a2f82916291e793e46ee3f5bf Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 9 Apr 2011 00:38:18 +0100 Subject: correct error when attempting to stomp on an unavailable binding ...and fix the specs too plus some cosmetic shuffling --- src/rabbit_binding.erl | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b0a59a0c..c2c8dc1f 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -17,7 +17,7 @@ -module(rabbit_binding). -include("rabbit.hrl"). --export([recover/2, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([recover/2, exists/1, add/1, add/2, remove/1, remove/2, list/1]). -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, @@ -38,25 +38,24 @@ -type(bind_errors() :: rabbit_types:error('source_not_found' | 'destination_not_found' | 'source_and_destination_not_found')). --type(bind_res() :: 'ok' | bind_errors()). +-type(bind_ok_or_error() :: 'ok' | bind_errors() | + rabbit_types:error('binding_not_found')). +-type(bind_res() :: bind_ok_or_error() | rabbit_misc:const(bind_ok_or_error())). -type(inner_fun() :: fun((rabbit_types:exchange(), rabbit_types:exchange() | rabbit_types:amqqueue()) -> rabbit_types:ok_or_error(rabbit_types:amqp_error()))). -type(bindings() :: [rabbit_types:binding()]). --type(add_res() :: bind_res() | rabbit_misc:const(bind_res())). --type(bind_or_error() :: bind_res() | rabbit_types:error('binding_not_found')). --type(remove_res() :: bind_or_error() | rabbit_misc:const(bind_or_error())). -opaque(deletions() :: dict()). -spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) -> 'ok'). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). --spec(add/1 :: (rabbit_types:binding()) -> add_res()). --spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). --spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> add_res()). --spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> remove_res()). +-spec(add/1 :: (rabbit_types:binding()) -> bind_res()). +-spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()). +-spec(remove/1 :: (rabbit_types:binding()) -> bind_res()). +-spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()). -spec(list/1 :: (rabbit_types:vhost()) -> bindings()). -spec(list_for_source/1 :: (rabbit_types:binding_source()) -> bindings()). @@ -149,7 +148,7 @@ add(Src, Dst, B) -> rabbit_event:notify_if(not Tx, binding_created, info(B)) end; - false -> rabbit_misc:const(not_found) + false -> rabbit_misc:const({error, binding_not_found}) end. remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). -- cgit v1.2.1