From e32010b15399834977c17614f55d8f660c4c3971 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Thu, 9 Apr 2009 16:49:42 -0700 Subject: Initial solution. Remaining: documentation; rabbitmqctl interface. --- src/rabbit_amqqueue_process.erl | 1 + src/rabbit_channel.erl | 1 + src/rabbit_log.erl | 52 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index c390b2b7..990a2545 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -161,6 +161,7 @@ deliver_immediately(Message, Delivered, round_robin = RoundRobin, next_msg_id = NextId}) -> ?LOGDEBUG("AMQQUEUE ~p DELIVERY:~n~p~n", [QName, Message]), + rabbit_log:tap_trace_out(Message, QName), case queue:out(RoundRobin) of {{value, QEntry = {ChPid, #consumer{tag = ConsumerTag, ack_required = AckRequired}}}, diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 7574cd67..c074fe09 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -765,6 +765,7 @@ binding_action(Fun, ExchangeNameBin, QueueNameBin, RoutingKey, Arguments, publish(Mandatory, Immediate, Message, QPids, State = #ch{transaction_id = TxnKey, writer_pid = WriterPid}) -> + rabbit_log:tap_trace_in(Message, QPids), Handled = deliver(QPids, Mandatory, Immediate, TxnKey, Message, WriterPid), case TxnKey of diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index f408336e..5d1a8f60 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -41,9 +41,13 @@ -export([debug/1, debug/2, message/4, info/1, info/2, warning/1, warning/2, error/1, error/2]). +-export([tap_trace_in/2, tap_trace_out/2]). + -import(io). -import(error_logger). +-include("rabbit.hrl"). + -define(SERVER, ?MODULE). %%---------------------------------------------------------------------------- @@ -95,6 +99,54 @@ error(Fmt) -> error(Fmt, Args) when is_list(Args) -> gen_server:cast(?SERVER, {error, Fmt, Args}). +tap_trace_in(Message = #basic_message{exchange_name = XName}, + QPids) -> + case application:get_env(trace_exchange) of + undefined -> + ok; + {ok, TraceExchangeBin} -> + QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || P <- QPids], + QNames = [N || [{name, #resource{name = N}}] <- QInfos], + maybe_inject(TraceExchangeBin, + XName, + <<"publish">>, + XName, + [{queue_names, QNames}, + {message, Message}]) + end. + +tap_trace_out(Message = #basic_message{exchange_name = XName}, + QName) -> + case application:get_env(trace_exchange) of + undefined -> + ok; + {ok, TraceExchangeBin} -> + maybe_inject(TraceExchangeBin, + XName, + <<"deliver">>, + QName, + [{message, Message}]) + end. + +maybe_inject(TraceExchangeBin, + #resource{virtual_host = VHostBin, name = OriginalExchangeBin}, + RKPrefix, + #resource{name = RKSuffix}, + Term) -> + if + TraceExchangeBin =:= OriginalExchangeBin -> + ok; + true -> + rabbit_exchange:simple_publish( + false, + false, + rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), + <>, + <<"text/plain">>, + list_to_binary(io_lib:format("~p", [Term]))), + ok + end. + %%-------------------------------------------------------------------- init([]) -> {ok, none}. -- cgit v1.2.1 From a0aaa46e1611815ef7784f05684a4b1ee2084d33 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Thu, 9 Apr 2009 17:16:57 -0700 Subject: rabbitmqctl tap control --- src/rabbit_control.erl | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index e6717d68..24765b3a 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -124,6 +124,10 @@ Available commands: list_bindings [-p ] list_connections [ ...] + enable_tap + query_tap + disable_tap + Quiet output mode is selected with the \"-q\" flag. Informational messages are suppressed when quiet mode is in effect. @@ -263,6 +267,18 @@ action(list_connections, Node, Args, Inform) -> [ArgAtoms]), ArgAtoms); +action(enable_tap, Node, [ExchangeName], Inform) -> + Inform("Enabling tap to exchange ~p", [ExchangeName]), + rpc_call(Node, application, set_env, [rabbit, trace_exchange, list_to_binary(ExchangeName)]); + +action(query_tap, Node, [], Inform) -> + Inform("Querying tap", []), + io:format("~p~n", [rpc_call(Node, application, get_env, [rabbit, trace_exchange])]); + +action(disable_tap, Node, [], Inform) -> + Inform("Disabling tap", []), + rpc_call(Node, application, unset_env, [rabbit, trace_exchange]); + action(Command, Node, Args, Inform) -> {VHost, RemainingArgs} = parse_vhost_flag(Args), action(Command, Node, VHost, RemainingArgs, Inform). -- cgit v1.2.1 From 5681901fae2f100384aef5ac70630372f12d207f Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Thu, 9 Apr 2009 18:41:49 -0700 Subject: Use AMQP table encoding for the data. --- src/rabbit_log.erl | 103 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 78 insertions(+), 25 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 5d1a8f60..aa22ffe3 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -47,6 +47,7 @@ -import(error_logger). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -define(SERVER, ?MODULE). @@ -101,38 +102,51 @@ error(Fmt, Args) when is_list(Args) -> tap_trace_in(Message = #basic_message{exchange_name = XName}, QPids) -> - case application:get_env(trace_exchange) of - undefined -> - ok; - {ok, TraceExchangeBin} -> - QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || P <- QPids], - QNames = [N || [{name, #resource{name = N}}] <- QInfos], - maybe_inject(TraceExchangeBin, - XName, - <<"publish">>, - XName, - [{queue_names, QNames}, - {message, Message}]) - end. + check_trace(fun (TraceExchangeBin) -> + QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || P <- QPids], + QNames = [N || [{name, #resource{name = N}}] <- QInfos], + maybe_inject(TraceExchangeBin, + XName, + <<"publish">>, + XName, + [{<<"queue_names">>, + longstr, + list_to_binary(rabbit_misc:intersperse(",", QNames))}, + {<<"message">>, + table, + message_to_table(Message)}]) + end). tap_trace_out(Message = #basic_message{exchange_name = XName}, QName) -> - case application:get_env(trace_exchange) of - undefined -> - ok; - {ok, TraceExchangeBin} -> - maybe_inject(TraceExchangeBin, - XName, - <<"deliver">>, - QName, - [{message, Message}]) + check_trace(fun (TraceExchangeBin) -> + maybe_inject(TraceExchangeBin, + XName, + <<"deliver">>, + QName, + [{<<"message">>, + table, + message_to_table(Message)}]) + end). + +check_trace(F) -> + case catch case application:get_env(trace_exchange) of + undefined -> + ok; + {ok, TraceExchangeBin} -> + F(TraceExchangeBin) + end of + {'EXIT', Reason} -> + info("Trace tap died with reason ~p~n", [Reason]); + ok -> + ok end. maybe_inject(TraceExchangeBin, #resource{virtual_host = VHostBin, name = OriginalExchangeBin}, RKPrefix, #resource{name = RKSuffix}, - Term) -> + Table) -> if TraceExchangeBin =:= OriginalExchangeBin -> ok; @@ -142,11 +156,50 @@ maybe_inject(TraceExchangeBin, false, rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), <>, - <<"text/plain">>, - list_to_binary(io_lib:format("~p", [Term]))), + <<"application/x-amqp-table; version=0-8">>, + rabbit_binary_generator:generate_table(Table)), ok end. +message_to_table(#basic_message{exchange_name = #resource{name = XName}, + routing_key = RoutingKey, + content = Content}) -> + #content{properties = Props, + payload_fragments_rev = PFR} = rabbit_binary_parser:ensure_content_decoded(Content), + #'P_basic'{content_type = ContentType, + content_encoding = ContentEncoding, + headers = Headers, + delivery_mode = DeliveryMode, + priority = Priority, + correlation_id = CorrelationId, + reply_to = ReplyTo, + expiration = Expiration, + message_id = MessageId, + timestamp = Timestamp, + type = Type, + user_id = UserId, + app_id = AppId} = Props, + [{<<"exchange_name">>, longstr, XName}, + {<<"routing_key">>, longstr, RoutingKey}, + {<<"headers">>, table, prune_undefined([{<<"content_type">>, longstr, ContentType}, + {<<"content_encoding">>, longstr, ContentEncoding}, + {<<"headers">>, table, Headers}, + {<<"delivery_mode">>, signedint, DeliveryMode}, + {<<"priority">>, signedint, Priority}, + {<<"correlation_id">>, longstr, CorrelationId}, + {<<"reply_to">>, longstr, ReplyTo}, + {<<"expiration">>, longstr, Expiration}, + {<<"message_id">>, longstr, MessageId}, + {<<"timestamp">>, longstr, Timestamp}, + {<<"type">>, longstr, Type}, + {<<"user_id">>, longstr, UserId}, + {<<"app_id">>, longstr, AppId}])}, + {<<"body">>, binary, list_to_binary(lists:reverse(PFR))}]. + +prune_undefined(Fields) -> + [F || F = {_, _, Value} <- Fields, + Value =/= undefined]. + %%-------------------------------------------------------------------- init([]) -> {ok, none}. -- cgit v1.2.1 From 2e7f226e12124036ab3283e5ab0f1870acf308a3 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Thu, 9 Apr 2009 18:44:57 -0700 Subject: Use longstr instead of (nonstandard) binary --- src/rabbit_log.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index aa22ffe3..c5ddc6ac 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -194,7 +194,7 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, {<<"type">>, longstr, Type}, {<<"user_id">>, longstr, UserId}, {<<"app_id">>, longstr, AppId}])}, - {<<"body">>, binary, list_to_binary(lists:reverse(PFR))}]. + {<<"body">>, longstr, list_to_binary(lists:reverse(PFR))}]. prune_undefined(Fields) -> [F || F = {_, _, Value} <- Fields, -- cgit v1.2.1 From 17717404f14d772244dbd988f7e2aca783d13335 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Fri, 10 Apr 2009 15:06:44 -0700 Subject: Generalize rabbitmqctl to set_env/get_env/unset_env --- src/rabbit_control.erl | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 24765b3a..941c5999 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -124,9 +124,9 @@ Available commands: list_bindings [-p ] list_connections [ ...] - enable_tap - query_tap - disable_tap + set_env + get_env + unset_env Quiet output mode is selected with the \"-q\" flag. Informational messages are suppressed when quiet mode is in effect. @@ -267,17 +267,19 @@ action(list_connections, Node, Args, Inform) -> [ArgAtoms]), ArgAtoms); -action(enable_tap, Node, [ExchangeName], Inform) -> - Inform("Enabling tap to exchange ~p", [ExchangeName]), - rpc_call(Node, application, set_env, [rabbit, trace_exchange, list_to_binary(ExchangeName)]); +action(set_env, Node, [VariableName, ErlangTermStr], Inform) -> + Inform("Setting variable ~p for node ~p to ~s", [VariableName, Node, ErlangTermStr]), + {ok, Tokens, _} = erl_scan:string(ErlangTermStr ++ "."), + {ok, Term} = erl_parse:parse_term(Tokens), + rpc_call(Node, application, set_env, [rabbit, list_to_atom(VariableName), Term]); -action(query_tap, Node, [], Inform) -> - Inform("Querying tap", []), - io:format("~p~n", [rpc_call(Node, application, get_env, [rabbit, trace_exchange])]); +action(get_env, Node, [VariableName], Inform) -> + Inform("Getting variable ~p for node ~p", [VariableName, Node]), + io:format("~p~n", [rpc_call(Node, application, get_env, [rabbit, list_to_atom(VariableName)])]); -action(disable_tap, Node, [], Inform) -> - Inform("Disabling tap", []), - rpc_call(Node, application, unset_env, [rabbit, trace_exchange]); +action(unset_env, Node, [VariableName], Inform) -> + Inform("Clearing variable ~p for node ~p", [VariableName, Node]), + rpc_call(Node, application, unset_env, [rabbit, list_to_atom(VariableName)]); action(Command, Node, Args, Inform) -> {VHost, RemainingArgs} = parse_vhost_flag(Args), -- cgit v1.2.1 From 85a3e92eb4a361dbb5229f40de993fba3064261b Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Fri, 10 Apr 2009 19:03:47 -0700 Subject: Switch to two-arg version of get_env for performance improvement --- src/rabbit_log.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index c5ddc6ac..0cf44c8d 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -130,7 +130,7 @@ tap_trace_out(Message = #basic_message{exchange_name = XName}, end). check_trace(F) -> - case catch case application:get_env(trace_exchange) of + case catch case application:get_env(rabbit, trace_exchange) of undefined -> ok; {ok, TraceExchangeBin} -> -- cgit v1.2.1 From 0adc9afd532f879643ca3fd1d509dbf9df1ab523 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Sat, 11 Apr 2009 09:35:03 -0700 Subject: Trace activity on a per-vhost basis --- src/rabbit_control.erl | 31 +++++++++++++++++-------------- src/rabbit_log.erl | 33 ++++++++++++++++++++------------- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 941c5999..d0fb8da4 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -124,9 +124,9 @@ Available commands: list_bindings [-p ] list_connections [ ...] - set_env - get_env - unset_env + set_env + get_env + unset_env Quiet output mode is selected with the \"-q\" flag. Informational messages are suppressed when quiet mode is in effect. @@ -267,19 +267,17 @@ action(list_connections, Node, Args, Inform) -> [ArgAtoms]), ArgAtoms); -action(set_env, Node, [VariableName, ErlangTermStr], Inform) -> - Inform("Setting variable ~p for node ~p to ~s", [VariableName, Node, ErlangTermStr]), - {ok, Tokens, _} = erl_scan:string(ErlangTermStr ++ "."), - {ok, Term} = erl_parse:parse_term(Tokens), - rpc_call(Node, application, set_env, [rabbit, list_to_atom(VariableName), Term]); +action(set_env, Node, [VarStr, TermStr], Inform) -> + Inform("Setting control variable ~s for node ~p to ~s", [VarStr, Node, TermStr]), + rpc_call(Node, application, set_env, [rabbit, parse_term(VarStr), parse_term(TermStr)]); -action(get_env, Node, [VariableName], Inform) -> - Inform("Getting variable ~p for node ~p", [VariableName, Node]), - io:format("~p~n", [rpc_call(Node, application, get_env, [rabbit, list_to_atom(VariableName)])]); +action(get_env, Node, [VarStr], Inform) -> + Inform("Getting control variable ~s for node ~p", [VarStr, Node]), + io:format("~p~n", [rpc_call(Node, application, get_env, [rabbit, parse_term(VarStr)])]); -action(unset_env, Node, [VariableName], Inform) -> - Inform("Clearing variable ~p for node ~p", [VariableName, Node]), - rpc_call(Node, application, unset_env, [rabbit, list_to_atom(VariableName)]); +action(unset_env, Node, [VarStr], Inform) -> + Inform("Clearing control variable ~s for node ~p", [VarStr, Node]), + rpc_call(Node, application, unset_env, [rabbit, parse_term(VarStr)]); action(Command, Node, Args, Inform) -> {VHost, RemainingArgs} = parse_vhost_flag(Args), @@ -318,6 +316,11 @@ default_if_empty(List, Default) when is_list(List) -> [list_to_atom(X) || X <- List] end. +parse_term(Str) -> + {ok, Tokens, _} = erl_scan:string(Str ++ "."), + {ok, Term} = erl_parse:parse_term(Tokens), + Term. + display_info_list(Results, InfoItemKeys) when is_list(Results) -> lists:foreach(fun (Result) -> display_row([format_info_item(Result, X) || X <- InfoItemKeys]) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 0cf44c8d..b581d1ee 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -100,15 +100,18 @@ error(Fmt) -> error(Fmt, Args) when is_list(Args) -> gen_server:cast(?SERVER, {error, Fmt, Args}). -tap_trace_in(Message = #basic_message{exchange_name = XName}, +tap_trace_in(Message = #basic_message{exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}, QPids) -> - check_trace(fun (TraceExchangeBin) -> + check_trace(VHostBin, + fun (TraceExchangeBin) -> QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || P <- QPids], QNames = [N || [{name, #resource{name = N}}] <- QInfos], maybe_inject(TraceExchangeBin, - XName, + VHostBin, + XNameBin, <<"publish">>, - XName, + XNameBin, [{<<"queue_names">>, longstr, list_to_binary(rabbit_misc:intersperse(",", QNames))}, @@ -117,20 +120,23 @@ tap_trace_in(Message = #basic_message{exchange_name = XName}, message_to_table(Message)}]) end). -tap_trace_out(Message = #basic_message{exchange_name = XName}, - QName) -> - check_trace(fun (TraceExchangeBin) -> +tap_trace_out(Message = #basic_message{exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}, + #resource{name = QNameBin}) -> + check_trace(VHostBin, + fun (TraceExchangeBin) -> maybe_inject(TraceExchangeBin, - XName, + VHostBin, + XNameBin, <<"deliver">>, - QName, + QNameBin, [{<<"message">>, table, message_to_table(Message)}]) end). -check_trace(F) -> - case catch case application:get_env(rabbit, trace_exchange) of +check_trace(VHostBin, F) -> + case catch case application:get_env(rabbit, {trace_exchange, VHostBin}) of undefined -> ok; {ok, TraceExchangeBin} -> @@ -143,9 +149,10 @@ check_trace(F) -> end. maybe_inject(TraceExchangeBin, - #resource{virtual_host = VHostBin, name = OriginalExchangeBin}, + VHostBin, + OriginalExchangeBin, RKPrefix, - #resource{name = RKSuffix}, + RKSuffix, Table) -> if TraceExchangeBin =:= OriginalExchangeBin -> -- cgit v1.2.1 From 519a16ec69f5c1a6f52f336a0a716971df144a25 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Sat, 11 Apr 2009 16:24:15 -0700 Subject: Document rabbitmqctl changes in the pod --- docs/rabbitmqctl.1.pod | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/rabbitmqctl.1.pod b/docs/rabbitmqctl.1.pod index a0232a40..8a29b928 100644 --- a/docs/rabbitmqctl.1.pod +++ b/docs/rabbitmqctl.1.pod @@ -282,6 +282,18 @@ optional virtual host parameter for which to display results, defaulting to I<"/">. The default can be overridden with the B<-p> flag. Result columns for these commands and list_connections are tab-separated. +=head2 CONTROL VARIABLES + +set_env I I + set the value of control variable I to I. + +get_env I + get the value of control variable I, printing either + {ok,I} or undefined. + +unset_env I + clear control variable I. + =head1 EXAMPLES Create a user named foo with (initial) password bar at the Erlang node -- cgit v1.2.1 From 09450cdccb23baaf936a37c045b9875a35fb3fc8 Mon Sep 17 00:00:00 2001 From: Tony Garnock-Jones Date: Sat, 9 May 2009 12:03:22 +0100 Subject: Move outbound tap to saner location; take advantage of extra info at that point --- src/rabbit_amqqueue_process.erl | 1 - src/rabbit_channel.erl | 10 ++++++---- src/rabbit_log.erl | 24 +++++++++++++++++------- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 990a2545..c390b2b7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -161,7 +161,6 @@ deliver_immediately(Message, Delivered, round_robin = RoundRobin, next_msg_id = NextId}) -> ?LOGDEBUG("AMQQUEUE ~p DELIVERY:~n~p~n", [QName, Message]), - rabbit_log:tap_trace_out(Message, QName), case queue:out(RoundRobin) of {{value, QEntry = {ChPid, #consumer{tag = ConsumerTag, ack_required = AckRequired}}}, diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index bcbf2810..7c1dcf02 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -362,6 +362,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, routing_key = RoutingKey, content = Content}}} -> State1 = lock_message(not(NoAck), {DeliveryTag, none, Msg}, State), + rabbit_log:tap_trace_out(Msg, DeliveryTag, none), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, @@ -937,15 +938,16 @@ lock_message(false, _MsgStruct, State) -> State. internal_deliver(WriterPid, Notify, ConsumerTag, DeliveryTag, - {_QName, QPid, _MsgId, Redelivered, - #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, - content = Content}}) -> + Msg = {_QName, QPid, _MsgId, Redelivered, + #basic_message{exchange_name = ExchangeName, + routing_key = RoutingKey, + content = Content}}) -> M = #'basic.deliver'{consumer_tag = ConsumerTag, delivery_tag = DeliveryTag, redelivered = Redelivered, exchange = ExchangeName#resource.name, routing_key = RoutingKey}, + rabbit_log:tap_trace_out(Msg, DeliveryTag, ConsumerTag), ok = case Notify of true -> rabbit_writer:send_command_and_notify( WriterPid, QPid, self(), M, Content); diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index b581d1ee..93c01400 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -41,7 +41,7 @@ -export([debug/1, debug/2, message/4, info/1, info/2, warning/1, warning/2, error/1, error/2]). --export([tap_trace_in/2, tap_trace_out/2]). +-export([tap_trace_in/2, tap_trace_out/3]). -import(io). -import(error_logger). @@ -120,19 +120,29 @@ tap_trace_in(Message = #basic_message{exchange_name = #resource{virtual_host = V message_to_table(Message)}]) end). -tap_trace_out(Message = #basic_message{exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}, - #resource{name = QNameBin}) -> +tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, + Message = #basic_message{exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}}, + DeliveryTag, + ConsumerTagOrNone) -> check_trace(VHostBin, fun (TraceExchangeBin) -> + RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, + Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later + {<<"queue_msg_number">>, signedint, QMsgId}, + {<<"redelivered">>, signedint, RedeliveredNum}, + {<<"message">>, table, message_to_table(Message)}], + Fields = case ConsumerTagOrNone of + none -> Fields0; + ConsumerTag -> [{<<"consumer_tag">>, longstr, ConsumerTag} + | Fields0] + end, maybe_inject(TraceExchangeBin, VHostBin, XNameBin, <<"deliver">>, QNameBin, - [{<<"message">>, - table, - message_to_table(Message)}]) + Fields) end). check_trace(VHostBin, F) -> -- cgit v1.2.1 From 18443fcb146e270af9116bb6deff56068746c384 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 9 May 2009 23:26:42 +0100 Subject: cosmetic --- src/rabbit_log.erl | 159 +++++++++++++++++++++++++---------------------------- 1 file changed, 76 insertions(+), 83 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 93c01400..87cda552 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -100,70 +100,61 @@ error(Fmt) -> error(Fmt, Args) when is_list(Args) -> gen_server:cast(?SERVER, {error, Fmt, Args}). -tap_trace_in(Message = #basic_message{exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}, +tap_trace_in(Message = #basic_message{exchange_name = #resource{ + virtual_host = VHostBin, + name = XNameBin}}, QPids) -> - check_trace(VHostBin, - fun (TraceExchangeBin) -> - QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || P <- QPids], - QNames = [N || [{name, #resource{name = N}}] <- QInfos], - maybe_inject(TraceExchangeBin, - VHostBin, - XNameBin, - <<"publish">>, - XNameBin, - [{<<"queue_names">>, - longstr, - list_to_binary(rabbit_misc:intersperse(",", QNames))}, - {<<"message">>, - table, - message_to_table(Message)}]) - end). + check_trace( + VHostBin, + fun (TraceExchangeBin) -> + QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || + P <- QPids], + QNames = [N || [{name, #resource{name = N}}] <- QInfos], + QNamesStr = list_to_binary(rabbit_misc:intersperse(",", QNames)), + EncodedMessage = message_to_table(Message), + maybe_inject(TraceExchangeBin, VHostBin, XNameBin, + <<"publish">>, XNameBin, + [{<<"queue_names">>, longstr, QNamesStr}, + {<<"message">>, table, EncodedMessage}]) + end). tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, - Message = #basic_message{exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}}, + Message = #basic_message{exchange_name = #resource{ + virtual_host = VHostBin, + name = XNameBin}}}, DeliveryTag, ConsumerTagOrNone) -> - check_trace(VHostBin, - fun (TraceExchangeBin) -> - RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later - {<<"queue_msg_number">>, signedint, QMsgId}, - {<<"redelivered">>, signedint, RedeliveredNum}, - {<<"message">>, table, message_to_table(Message)}], - Fields = case ConsumerTagOrNone of - none -> Fields0; - ConsumerTag -> [{<<"consumer_tag">>, longstr, ConsumerTag} - | Fields0] - end, - maybe_inject(TraceExchangeBin, - VHostBin, - XNameBin, - <<"deliver">>, - QNameBin, - Fields) - end). + check_trace( + VHostBin, + fun (TraceExchangeBin) -> + RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, + EncodedMessage = message_to_table(Message), + Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later + {<<"queue_msg_number">>, signedint, QMsgId}, + {<<"redelivered">>, signedint, RedeliveredNum}, + {<<"message">>, table, EncodedMessage}], + Fields = case ConsumerTagOrNone of + none -> + Fields0; + ConsumerTag -> + [{<<"consumer_tag">>, longstr, ConsumerTag} + | Fields0] + end, + maybe_inject(TraceExchangeBin, VHostBin, XNameBin, + <<"deliver">>, QNameBin, Fields) + end). check_trace(VHostBin, F) -> case catch case application:get_env(rabbit, {trace_exchange, VHostBin}) of - undefined -> - ok; - {ok, TraceExchangeBin} -> - F(TraceExchangeBin) + undefined -> ok; + {ok, TraceExchangeBin} -> F(TraceExchangeBin) end of - {'EXIT', Reason} -> - info("Trace tap died with reason ~p~n", [Reason]); - ok -> - ok + {'EXIT', Reason} -> info("Trace tap died with reason ~p~n", [Reason]); + ok -> ok end. -maybe_inject(TraceExchangeBin, - VHostBin, - OriginalExchangeBin, - RKPrefix, - RKSuffix, - Table) -> +maybe_inject(TraceExchangeBin, VHostBin, OriginalExchangeBin, + RKPrefix, RKSuffix, Table) -> if TraceExchangeBin =:= OriginalExchangeBin -> ok; @@ -181,37 +172,39 @@ maybe_inject(TraceExchangeBin, message_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_key = RoutingKey, content = Content}) -> - #content{properties = Props, - payload_fragments_rev = PFR} = rabbit_binary_parser:ensure_content_decoded(Content), - #'P_basic'{content_type = ContentType, - content_encoding = ContentEncoding, - headers = Headers, - delivery_mode = DeliveryMode, - priority = Priority, - correlation_id = CorrelationId, - reply_to = ReplyTo, - expiration = Expiration, - message_id = MessageId, - timestamp = Timestamp, - type = Type, - user_id = UserId, - app_id = AppId} = Props, + #content{properties = #'P_basic'{content_type = ContentType, + content_encoding = ContentEncoding, + headers = Headers, + delivery_mode = DeliveryMode, + priority = Priority, + correlation_id = CorrelationId, + reply_to = ReplyTo, + expiration = Expiration, + message_id = MessageId, + timestamp = Timestamp, + type = Type, + user_id = UserId, + app_id = AppId}, + payload_fragments_rev = PFR} = + rabbit_binary_parser:ensure_content_decoded(Content), + Headers = prune_undefined( + [{<<"content_type">>, longstr, ContentType}, + {<<"content_encoding">>, longstr, ContentEncoding}, + {<<"headers">>, table, Headers}, + {<<"delivery_mode">>, signedint, DeliveryMode}, + {<<"priority">>, signedint, Priority}, + {<<"correlation_id">>, longstr, CorrelationId}, + {<<"reply_to">>, longstr, ReplyTo}, + {<<"expiration">>, longstr, Expiration}, + {<<"message_id">>, longstr, MessageId}, + {<<"timestamp">>, longstr, Timestamp}, + {<<"type">>, longstr, Type}, + {<<"user_id">>, longstr, UserId}, + {<<"app_id">>, longstr, AppId}]), [{<<"exchange_name">>, longstr, XName}, - {<<"routing_key">>, longstr, RoutingKey}, - {<<"headers">>, table, prune_undefined([{<<"content_type">>, longstr, ContentType}, - {<<"content_encoding">>, longstr, ContentEncoding}, - {<<"headers">>, table, Headers}, - {<<"delivery_mode">>, signedint, DeliveryMode}, - {<<"priority">>, signedint, Priority}, - {<<"correlation_id">>, longstr, CorrelationId}, - {<<"reply_to">>, longstr, ReplyTo}, - {<<"expiration">>, longstr, Expiration}, - {<<"message_id">>, longstr, MessageId}, - {<<"timestamp">>, longstr, Timestamp}, - {<<"type">>, longstr, Type}, - {<<"user_id">>, longstr, UserId}, - {<<"app_id">>, longstr, AppId}])}, - {<<"body">>, longstr, list_to_binary(lists:reverse(PFR))}]. + {<<"routing_key">>, longstr, RoutingKey}, + {<<"headers">>, table, Headers}, + {<<"body">>, longstr, list_to_binary(lists:reverse(PFR))}]. prune_undefined(Fields) -> [F || F = {_, _, Value} <- Fields, -- cgit v1.2.1 From 766295ae1ff8e59b61c93b345624eda3f146e9f4 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 26 Aug 2010 18:46:36 +0100 Subject: implementing topic routing with tries; adding better test for topic routing --- include/rabbit.hrl | 6 + src/rabbit_exchange_type_topic.erl | 253 +++++++++++++++++++++++++++++++------ src/rabbit_mnesia.erl | 17 +++ src/rabbit_router.erl | 6 + src/rabbit_tests.erl | 114 +++++++++++++---- 5 files changed, 334 insertions(+), 62 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index b9abd788..210709b9 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -63,6 +63,12 @@ -record(binding, {exchange_name, key, queue_name, args = []}). -record(reverse_binding, {queue_name, key, exchange_name, args = []}). +-record(topic_trie_edge, {trie_edge, node_id}). +-record(topic_trie_binding, {trie_binding, value = const}). + +-record(trie_edge, {exchange_name, node_id, word}). +-record(trie_binding, {exchange_name, node_id, queue_name}). + -record(listener, {node, protocol, host, port}). -record(basic_message, {exchange_name, routing_key, content, guid, diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index e796acf3..35f25ccb 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -30,6 +30,7 @@ %% -module(rabbit_exchange_type_topic). +-include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). -behaviour(rabbit_exchange_type). @@ -46,59 +47,231 @@ {requires, rabbit_exchange_type_registry}, {enables, kernel_ready}]}). --export([topic_matches/2]). +-export([which_matches/2]). -ifdef(use_specs). --spec(topic_matches/2 :: (binary(), binary()) -> boolean()). +-spec(which_matches/2 :: + (rabbit_exchange:name(), rabbit_router:routing_key()) -> + [rabbit_amqqueue:name()]). -endif. +%%---------------------------------------------------------------------------- + description() -> [{name, <<"topic">>}, {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. -publish(#exchange{name = Name}, Delivery = - #delivery{message = #basic_message{routing_key = RoutingKey}}) -> - rabbit_router:deliver(rabbit_router:match_bindings( - Name, fun (#binding{key = BindingKey}) -> - topic_matches(BindingKey, RoutingKey) - end), - Delivery). - -split_topic_key(Key) -> - string:tokens(binary_to_list(Key), "."). - -topic_matches(PatternKey, RoutingKey) -> - P = split_topic_key(PatternKey), - R = split_topic_key(RoutingKey), - topic_matches1(P, R). - -topic_matches1(["#"], _R) -> - true; -topic_matches1(["#" | PTail], R) -> - last_topic_match(PTail, [], lists:reverse(R)); -topic_matches1([], []) -> - true; -topic_matches1(["*" | PatRest], [_ | ValRest]) -> - topic_matches1(PatRest, ValRest); -topic_matches1([PatElement | PatRest], [ValElement | ValRest]) - when PatElement == ValElement -> - topic_matches1(PatRest, ValRest); -topic_matches1(_, _) -> - false. - -last_topic_match(P, R, []) -> - topic_matches1(P, R); -last_topic_match(P, R, [BacktrackNext | BacktrackList]) -> - topic_matches1(P, R) or - last_topic_match(P, [BacktrackNext | R], BacktrackList). +publish(#exchange{name = X}, Delivery = + #delivery{message = #basic_message{routing_key = Key}}) -> + rabbit_router:deliver_by_queue_names(which_matches(X, Key), Delivery). validate(_X) -> ok. create(_X) -> ok. recover(_X, _Bs) -> ok. -delete(_X, _Bs) -> ok. -add_binding(_X, _B) -> ok. -remove_bindings(_X, _Bs) -> ok. + +delete(#exchange{name = X}, _Bs) -> + rabbit_misc:execute_mnesia_transaction(fun() -> trie_remove_all_edges(X), + trie_remove_all_bindings(X) + end), + ok. + +add_binding(_Exchange, #binding{exchange_name = X, key = K, queue_name = Q}) -> + rabbit_misc:execute_mnesia_transaction( + fun() -> FinalNode = follow_down_create(X, split_topic_key(K)), + trie_add_binding(X, FinalNode, Q) + end), + ok. + +remove_bindings(_X, Bs) -> + rabbit_misc:execute_mnesia_transaction( + fun() -> lists:foreach(fun remove_binding/1, Bs) end), + ok. + +remove_binding(#binding{exchange_name = X, key = K, queue_name = Q}) -> + Path = follow_down_get_path(X, split_topic_key(K)), + {FinalNode, _} = hd(Path), + trie_remove_binding(X, FinalNode, Q), + remove_path_if_empty(X, Path), + ok. + assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). + +%% NB: This function may return duplicate results in some situations (that's ok) +which_matches(X, Key) -> + Words = split_topic_key(Key), + mnesia:async_dirty(fun trie_match/2, [X, Words]). + +%%---------------------------------------------------------------------------- + +trie_match(X, Words) -> + trie_match(X, root, Words). +trie_match(X, Node, []) -> + FinalRes = trie_bindings(X, Node), + HashRes = case trie_child(X, Node, "#") of + {ok, HashNode} -> trie_match(X, HashNode, []); + error -> [] + end, + FinalRes ++ HashRes; +trie_match(X, Node, [W | RestW] = Words) -> + ExactRes = case trie_child(X, Node, W) of + {ok, NextNode} -> trie_match(X, NextNode, RestW); + error -> [] + end, + StarRes = case trie_child(X, Node, "*") of + {ok, StarNode} -> trie_match(X, StarNode, RestW); + error -> [] + end, + HashRes = case trie_child(X, Node, "#") of + {ok, HashNode} -> trie_match_skip_any(X, HashNode, Words); + error -> [] + end, + ExactRes ++ StarRes ++ HashRes. + +trie_match_skip_any(X, Node, []) -> + trie_match(X, Node, []); +trie_match_skip_any(X, Node, [_ | RestW] = Words) -> + trie_match(X, Node, Words) ++ + trie_match_skip_any(X, Node, RestW). + +follow_down(X, Words) -> + follow_down(X, root, Words). +follow_down(_X, CurNode, []) -> + {ok, CurNode}; +follow_down(X, CurNode, [W | RestW]) -> + case trie_child(X, CurNode, W) of + {ok, NextNode} -> follow_down(X, NextNode, RestW); + error -> {error, CurNode, [W | RestW]} + end. + +follow_down_create(X, Words) -> + case follow_down(X, Words) of + {ok, FinalNode} -> + FinalNode; + {error, Node, RestW} -> + lists:foldl( + fun(W, CurNode) -> + NewNode = new_node(), + trie_add_edge(X, CurNode, NewNode, W), + NewNode + end, Node, RestW) + end. + +follow_down_get_path(X, Words) -> + follow_down_get_path(X, root, Words, [{root, none}]). +follow_down_get_path(_, _, [], PathAcc) -> + PathAcc; +follow_down_get_path(X, CurNode, [W | RestW], PathAcc) -> + {ok, NextNode} = trie_child(X, CurNode, W), + follow_down_get_path(X, NextNode, RestW, [{NextNode, W} | PathAcc]). + +remove_path_if_empty(_, [{root, none}]) -> + ok; +remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) -> + case trie_has_any_bindings(X, Node) orelse + trie_has_any_children(X, Node) of + true -> ok; + false -> trie_remove_edge(X, Parent, Node, W), + remove_path_if_empty(X, RestPath) + end. + +trie_child(X, Node, Word) -> + Query = qlc:q([NextNode || + #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X1, + node_id = Node1, + word = Word1}, + node_id = NextNode} + <- mnesia:table(rabbit_topic_trie_edge), + X1 == X, + Node1 == Node, + Word1 == Word]), + case qlc:e(Query) of + [NextNode] -> {ok, NextNode}; + [] -> error + end. + +trie_bindings(X, Node) -> + MatchHead = #topic_trie_binding{ + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + queue_name = '$1'}}, + mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]). + +trie_add_edge(X, FromNode, ToNode, W) -> + trie_edge_op(X, FromNode, ToNode, W, fun mnesia:write/3). +trie_remove_edge(X, FromNode, ToNode, W) -> + trie_edge_op(X, FromNode, ToNode, W, fun mnesia:delete_object/3). +trie_edge_op(X, FromNode, ToNode, W, Op) -> + ok = Op(rabbit_topic_trie_edge, + #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + node_id = FromNode, + word = W}, + node_id = ToNode}, + write). + +trie_add_binding(X, Node, Q) -> + trie_binding_op(X, Node, Q, fun mnesia:write/3). +trie_remove_binding(X, Node, Q) -> + trie_binding_op(X, Node, Q, fun mnesia:delete_object/3). +trie_binding_op(X, Node, Q, Op) -> + ok = Op(rabbit_topic_trie_binding, + #topic_trie_binding{trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + queue_name = Q}}, + write). + +trie_has_any_children(X, Node) -> + MatchHead = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + node_id = Node, + word = '$1'}, + _='_'}, + Select = mnesia:select(rabbit_topic_trie_edge, + [{MatchHead, [], ['$1']}], 1, read), + select_while_no_result(Select) /= '$end_of_table'. + +trie_has_any_bindings(X, Node) -> + MatchHead = #topic_trie_binding{ + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + queue_name = '$1'}, + _='_'}, + Select = mnesia:select(rabbit_topic_trie_binding, + [{MatchHead, [], ['$1']}], 1, read), + select_while_no_result(Select) /= '$end_of_table'. + +select_while_no_result({[], Cont}) -> + select_while_no_result(mnesia:select(Cont)); +select_while_no_result(Other) -> + Other. + +trie_remove_all_edges(X) -> + Query = qlc:q([Entry || + Entry = #topic_trie_edge{ + trie_edge = #trie_edge{exchange_name = X1, + _='_'}, + _='_'} + <- mnesia:table(rabbit_topic_trie_edge), + X1 == X]), + lists:foreach( + fun(O) -> mnesia:delete_object(rabbit_topic_trie_edge, O, write) end, + qlc:e(Query)). + +trie_remove_all_bindings(X) -> + Query = qlc:q([Entry || + Entry = #topic_trie_binding{ + trie_binding = #trie_binding{exchange_name = X1, + _='_'}, + _='_'} + <- mnesia:table(rabbit_topic_trie_binding), + X1 == X]), + lists:foreach( + fun(O) -> mnesia:delete_object(rabbit_topic_trie_binding, O, write) end, + qlc:e(Query)). + +new_node() -> + now(). % UUID + +split_topic_key(Key) -> + string:tokens(binary_to_list(Key), "."). diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4a5adfae..37708b22 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -194,6 +194,17 @@ table_definitions() -> {type, ordered_set}, {match, #reverse_route{reverse_binding = reverse_binding_match(), _='_'}}]}, + {rabbit_topic_trie_edge, + [{record_name, topic_trie_edge}, + {attributes, record_info(fields, topic_trie_edge)}, + {type, ordered_set}, + {match, #topic_trie_edge{trie_edge = trie_edge_match(), _='_'}}]}, + {rabbit_topic_trie_binding, + [{record_name, topic_trie_binding}, + {attributes, record_info(fields, topic_trie_binding)}, + {type, ordered_set}, + {match, #topic_trie_binding{trie_binding = trie_binding_match(), + _='_'}}]}, %% Consider the implications to nodes_of_type/1 before altering %% the next entry. {rabbit_durable_exchange, @@ -223,6 +234,12 @@ reverse_binding_match() -> #reverse_binding{queue_name = queue_name_match(), exchange_name = exchange_name_match(), _='_'}. +trie_edge_match() -> + #trie_edge{exchange_name = exchange_name_match(), + _='_'}. +trie_binding_match() -> + #trie_edge{exchange_name = exchange_name_match(), + _='_'}. exchange_name_match() -> resource_match(exchange). queue_name_match() -> diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index ec049a1a..d7d6d0ad 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -34,6 +34,7 @@ -include("rabbit.hrl"). -export([deliver/2, + deliver_by_queue_names/2, match_bindings/2, match_routing_key/2]). @@ -48,6 +49,8 @@ -spec(deliver/2 :: ([pid()], rabbit_types:delivery()) -> {routing_result(), [pid()]}). +-spec(deliver_by_queue_names/2 :: + ([binary()], rabbit_types:delivery()) -> {routing_result(), [pid()]}). -endif. @@ -77,6 +80,9 @@ deliver(QPids, Delivery) -> check_delivery(Delivery#delivery.mandatory, Delivery#delivery.immediate, {Routed, Handled}). +deliver_by_queue_names(Qs, Delivery) -> + deliver(lookup_qpids(Qs), Delivery). + %% TODO: Maybe this should be handled by a cursor instead. %% TODO: This causes a full scan for each entry with the same exchange match_bindings(Name, Match) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 082e7877..1e7f533a 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -584,30 +584,100 @@ sequence_with_content(Sequence) -> rabbit_framing_amqp_0_9_1), Sequence). -test_topic_match(P, R) -> - test_topic_match(P, R, true). - -test_topic_match(P, R, Expected) -> - case rabbit_exchange_type_topic:topic_matches(list_to_binary(P), - list_to_binary(R)) of - Expected -> - passed; - _ -> - {topic_match_failure, P, R} - end. +test_topic_expect_match(#exchange{name = XName}, List) -> + lists:foreach( + fun({Key, Expected}) -> + Res = rabbit_exchange_type_topic:which_matches( + XName, list_to_binary(Key)), + ExpectedRes = lists:map( + fun(Q) -> #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)} + end, Expected), + true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) + end, List). test_topic_matching() -> - passed = test_topic_match("#", "test.test"), - passed = test_topic_match("#", ""), - passed = test_topic_match("#.T.R", "T.T.R"), - passed = test_topic_match("#.T.R", "T.R.T.R"), - passed = test_topic_match("#.Y.Z", "X.Y.Z.X.Y.Z"), - passed = test_topic_match("#.test", "test"), - passed = test_topic_match("#.test", "test.test"), - passed = test_topic_match("#.test", "ignored.test"), - passed = test_topic_match("#.test", "more.ignored.test"), - passed = test_topic_match("#.test", "notmatched", false), - passed = test_topic_match("#.z", "one.two.three.four", false), + XName = #resource{virtual_host = <<"/">>, + kind = exchange, + name = <<"test_exchange">>}, + X = #exchange{name = XName, type = topic, durable = false, + auto_delete = false, arguments = []}, + %% create + rabbit_exchange_type_topic:validate(X), + rabbit_exchange_type_topic:create(X), + + %% add some bindings + Bindings = lists:map( + fun({Key, Q}) -> + #binding{exchange_name = XName, + key = list_to_binary(Key), + queue_name = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}} + end, [{"a.b.c", "t1"}, + {"a.*.c", "t2"}, + {"a.#.b", "t3"}, + {"a.b.b.c", "t4"}, + {"#", "t5"}, + {"#.#", "t6"}, + {"#.b", "t7"}, + {"*.*", "t8"}, + {"a.*", "t9"}, + {"*.b.c", "t10"}, + {"a.#", "t11"}, + {"a.#.#", "t12"}, + {"b.b.c", "t13"}, + {"a.b.b", "t14"}, + {"a.b", "t15"}, + {"b.c", "t16"}, + {"", "t17"}, + {"*.*.*", "t18"}, + {"vodka.martini", "t19"}, + {"a.b.c", "t20"}]), + lists:foreach(fun(B) -> rabbit_exchange_type_topic:add_binding(X, B) end, + Bindings), + + %% test some matches + test_topic_expect_match(X, + [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", "t18", "t20"]}, + {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", "t12", "t15"]}, + {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", "t18"]}, + {"", ["t5", "t6", "t17"]}, + {"b.c.c", ["t5", "t6", "t18"]}, + {"a.a.a.a.a", ["t5", "t6", "t11", "t12"]}, + {"vodka.gin", ["t5", "t6", "t8"]}, + {"vodka.martini", ["t5", "t6", "t8", "t19"]}, + {"b.b.c", ["t5", "t6", "t10", "t13", "t18"]}, + {"nothing.here.at.all", ["t5", "t6"]}, + {"un_der_sc.ore", ["t5", "t6", "t8"]}]), + + %% remove some bindings + RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), + lists:nth(11, Bindings)], + rabbit_exchange_type_topic:remove_bindings(X, RemovedBindings), + RemainingBindings = ordsets:to_list( + ordsets:subtract(ordsets:from_list(Bindings), + ordsets:from_list(RemovedBindings))), + + %% test some matches + test_topic_expect_match(X, + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18"]}, + {"", ["t6", "t17"]}, + {"b.c.c", ["t6", "t18"]}, + {"a.a.a.a.a", ["t6", "t12"]}, + {"vodka.gin", ["t6", "t8"]}, + {"vodka.martini", ["t6", "t8", "t19"]}, + {"b.b.c", ["t6", "t10", "t13", "t18"]}, + {"nothing.here.at.all", ["t6"]}, + {"un_der_sc.ore", ["t6", "t8"]}]), + + %% remove the entire exchange + rabbit_exchange_type_topic:delete(X, RemainingBindings), + %% none should match now + test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), passed. test_app_management() -> -- cgit v1.2.1 From 2ae1ddd84c5896bd8888d807533fcfcbd5ad303f Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 26 Aug 2010 19:12:36 +0100 Subject: minor cosmetic --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 35f25ccb..e2114b5d 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -262,7 +262,7 @@ trie_remove_all_bindings(X) -> Query = qlc:q([Entry || Entry = #topic_trie_binding{ trie_binding = #trie_binding{exchange_name = X1, - _='_'}, + _='_'}, _='_'} <- mnesia:table(rabbit_topic_trie_binding), X1 == X]), -- cgit v1.2.1 From 21a74891c40dee991f0cd0084f7cd7e49e331b60 Mon Sep 17 00:00:00 2001 From: Vlad Ionescu Date: Mon, 27 Sep 2010 19:44:44 +0100 Subject: using rabbit_guid; cosmetic --- src/rabbit_exchange_type_topic.erl | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index e2114b5d..8e6918d0 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -133,8 +133,7 @@ trie_match(X, Node, [W | RestW] = Words) -> trie_match_skip_any(X, Node, []) -> trie_match(X, Node, []); trie_match_skip_any(X, Node, [_ | RestW] = Words) -> - trie_match(X, Node, Words) ++ - trie_match_skip_any(X, Node, RestW). + trie_match(X, Node, Words) ++ trie_match_skip_any(X, Node, RestW). follow_down(X, Words) -> follow_down(X, root, Words). @@ -148,15 +147,13 @@ follow_down(X, CurNode, [W | RestW]) -> follow_down_create(X, Words) -> case follow_down(X, Words) of - {ok, FinalNode} -> - FinalNode; - {error, Node, RestW} -> - lists:foldl( - fun(W, CurNode) -> - NewNode = new_node(), - trie_add_edge(X, CurNode, NewNode, W), - NewNode - end, Node, RestW) + {ok, FinalNode} -> FinalNode; + {error, Node, RestW} -> lists:foldl( + fun(W, CurNode) -> + NewNode = new_node(), + trie_add_edge(X, CurNode, NewNode, W), + NewNode + end, Node, RestW) end. follow_down_get_path(X, Words) -> @@ -271,7 +268,7 @@ trie_remove_all_bindings(X) -> qlc:e(Query)). new_node() -> - now(). % UUID + rabbit_guid:guid(). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). -- cgit v1.2.1 From dc18cde5fa3949beb48b0dd816220cd7f865bd07 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Sep 2010 17:20:08 +0100 Subject: Add necessary additional barrier bootstep --- src/rabbit.erl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/rabbit.erl b/src/rabbit.erl index 8c36a9f0..83b98669 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -44,26 +44,32 @@ %% Boot steps. -export([maybe_insert_default_data/0]). +-rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}). + -rabbit_boot_step({codec_correctness_check, [{description, "codec correctness check"}, {mfa, {rabbit_binary_generator, check_empty_content_body_frame_size, []}}, + {requires, pre_boot}, {enables, external_infrastructure}]}). -rabbit_boot_step({database, [{mfa, {rabbit_mnesia, init, []}}, + {requires, pre_boot}, {enables, external_infrastructure}]}). -rabbit_boot_step({file_handle_cache, [{description, "file handle cache server"}, {mfa, {rabbit_sup, start_restartable_child, [file_handle_cache]}}, + {requires, pre_boot}, {enables, worker_pool}]}). -rabbit_boot_step({worker_pool, [{description, "worker pool"}, {mfa, {rabbit_sup, start_child, [worker_pool_sup]}}, + {requires, pre_boot}, {enables, external_infrastructure}]}). -rabbit_boot_step({external_infrastructure, -- cgit v1.2.1 From 75206c6124f4be2d6e578186eacc5fbc7f331d99 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Tue, 28 Sep 2010 17:33:00 +0100 Subject: using recursive split; more tests --- src/rabbit_exchange_type_topic.erl | 18 +++++++- src/rabbit_tests.erl | 89 ++++++++++++++++++++++---------------- 2 files changed, 69 insertions(+), 38 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 8e6918d0..bbd5d357 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -108,6 +108,7 @@ which_matches(X, Key) -> trie_match(X, Words) -> trie_match(X, root, Words). + trie_match(X, Node, []) -> FinalRes = trie_bindings(X, Node), HashRes = case trie_child(X, Node, "#") of @@ -137,6 +138,7 @@ trie_match_skip_any(X, Node, [_ | RestW] = Words) -> follow_down(X, Words) -> follow_down(X, root, Words). + follow_down(_X, CurNode, []) -> {ok, CurNode}; follow_down(X, CurNode, [W | RestW]) -> @@ -158,6 +160,7 @@ follow_down_create(X, Words) -> follow_down_get_path(X, Words) -> follow_down_get_path(X, root, Words, [{root, none}]). + follow_down_get_path(_, _, [], PathAcc) -> PathAcc; follow_down_get_path(X, CurNode, [W | RestW], PathAcc) -> @@ -198,8 +201,10 @@ trie_bindings(X, Node) -> trie_add_edge(X, FromNode, ToNode, W) -> trie_edge_op(X, FromNode, ToNode, W, fun mnesia:write/3). + trie_remove_edge(X, FromNode, ToNode, W) -> trie_edge_op(X, FromNode, ToNode, W, fun mnesia:delete_object/3). + trie_edge_op(X, FromNode, ToNode, W, Op) -> ok = Op(rabbit_topic_trie_edge, #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, @@ -210,8 +215,10 @@ trie_edge_op(X, FromNode, ToNode, W, Op) -> trie_add_binding(X, Node, Q) -> trie_binding_op(X, Node, Q, fun mnesia:write/3). + trie_remove_binding(X, Node, Q) -> trie_binding_op(X, Node, Q, fun mnesia:delete_object/3). + trie_binding_op(X, Node, Q, Op) -> ok = Op(rabbit_topic_trie_binding, #topic_trie_binding{trie_binding = #trie_binding{exchange_name = X, @@ -271,4 +278,13 @@ new_node() -> rabbit_guid:guid(). split_topic_key(Key) -> - string:tokens(binary_to_list(Key), "."). + split_topic_key(Key, [], []). + +split_topic_key(<<>>, [], []) -> + []; +split_topic_key(<<>>, RevWordAcc, RevResAcc) -> + lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]); +split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) -> + split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]); +split_topic_key(<>, RevWordAcc, RevResAcc) -> + split_topic_key(Rest, [C | RevWordAcc], RevResAcc). diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index fb0faebb..0308f539 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -581,19 +581,6 @@ sequence_with_content(Sequence) -> rabbit_framing_amqp_0_9_1), Sequence). -test_topic_expect_match(#exchange{name = XName}, List) -> - lists:foreach( - fun({Key, Expected}) -> - Res = rabbit_exchange_type_topic:which_matches( - XName, list_to_binary(Key)), - ExpectedRes = lists:map( - fun(Q) -> #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)} - end, Expected), - true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) - end, List). - test_topic_matching() -> XName = #resource{virtual_host = <<"/">>, kind = exchange, @@ -631,27 +618,37 @@ test_topic_matching() -> {"", "t17"}, {"*.*.*", "t18"}, {"vodka.martini", "t19"}, - {"a.b.c", "t20"}]), + {"a.b.c", "t20"}, + {"*.#", "t21"}, + {"#.*.#", "t22"}, + {"*.#.#", "t23"}, + {"#.#.#", "t24"}, + {"*", "t25"}]), lists:foreach(fun(B) -> rabbit_exchange_type_topic:add_binding(X, B) end, Bindings), %% test some matches test_topic_expect_match(X, - [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", "t18", "t20"]}, - {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", "t12", "t15"]}, - {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", "t18"]}, - {"", ["t5", "t6", "t17"]}, - {"b.c.c", ["t5", "t6", "t18"]}, - {"a.a.a.a.a", ["t5", "t6", "t11", "t12"]}, - {"vodka.gin", ["t5", "t6", "t8"]}, - {"vodka.martini", ["t5", "t6", "t8", "t19"]}, - {"b.b.c", ["t5", "t6", "t10", "t13", "t18"]}, - {"nothing.here.at.all", ["t5", "t6"]}, - {"un_der_sc.ore", ["t5", "t6", "t8"]}]), + [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", "t18", "t20", + "t21", "t22", "t23", "t24"]}, + {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", "t12", "t15", + "t21", "t22", "t23", "t24"]}, + {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", "t18", "t21", + "t22", "t23", "t24"]}, + {"", ["t5", "t6", "t17", "t24"]}, + {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24"]}, + {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", "t23", "t24"]}, + {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", "t24"]}, + {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", + "t24"]}, + {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", "t22", "t23", + "t24"]}, + {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, + {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", "t25"]}]), %% remove some bindings RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), - lists:nth(11, Bindings)], + lists:nth(11, Bindings), lists:nth(21, Bindings)], rabbit_exchange_type_topic:remove_bindings(X, RemovedBindings), RemainingBindings = ordsets:to_list( ordsets:subtract(ordsets:from_list(Bindings), @@ -659,17 +656,20 @@ test_topic_matching() -> %% test some matches test_topic_expect_match(X, - [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20"]}, - {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15"]}, - {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18"]}, - {"", ["t6", "t17"]}, - {"b.c.c", ["t6", "t18"]}, - {"a.a.a.a.a", ["t6", "t12"]}, - {"vodka.gin", ["t6", "t8"]}, - {"vodka.martini", ["t6", "t8", "t19"]}, - {"b.b.c", ["t6", "t10", "t13", "t18"]}, - {"nothing.here.at.all", ["t6"]}, - {"un_der_sc.ore", ["t6", "t8"]}]), + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", "t23", + "t24"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", "t22", "t23", + "t24"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", "t23", + "t24"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t19", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", "t24"]}, + {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), %% remove the entire exchange rabbit_exchange_type_topic:delete(X, RemainingBindings), @@ -677,6 +677,21 @@ test_topic_matching() -> test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), passed. +test_topic_expect_match(#exchange{name = XName}, List) -> + lists:foreach( + fun({Key, Expected}) -> + io:format("~p ~p~n", [Key, Expected]), + Res = rabbit_exchange_type_topic:which_matches( + XName, list_to_binary(Key)), + io:format("Res: ~p~n", [Res]), + ExpectedRes = lists:map( + fun(Q) -> #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)} + end, Expected), + true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) + end, List). + test_app_management() -> %% starting, stopping, status ok = control_action(stop_app, []), -- cgit v1.2.1 From 6db65737ad2ad3d67d50a1b5adc0cfa28b999029 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Wed, 29 Sep 2010 19:36:24 +0100 Subject: using mnesia:read rather than qlc --- src/rabbit_exchange_type_topic.erl | 17 +++++------------ src/rabbit_tests.erl | 4 +--- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index bbd5d357..9091d385 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -178,18 +178,11 @@ remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) -> end. trie_child(X, Node, Word) -> - Query = qlc:q([NextNode || - #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X1, - node_id = Node1, - word = Word1}, - node_id = NextNode} - <- mnesia:table(rabbit_topic_trie_edge), - X1 == X, - Node1 == Node, - Word1 == Word]), - case qlc:e(Query) of - [NextNode] -> {ok, NextNode}; - [] -> error + case mnesia:read(rabbit_topic_trie_edge, #trie_edge{exchange_name = X, + node_id = Node, + word = Word}) of + [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; + [] -> error end. trie_bindings(X, Node) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 53aeea7f..cee728ea 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -673,16 +673,14 @@ test_topic_matching() -> %% remove the entire exchange rabbit_exchange_type_topic:delete(X, RemainingBindings), %% none should match now - test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), + test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), passed. test_topic_expect_match(#exchange{name = XName}, List) -> lists:foreach( fun({Key, Expected}) -> - io:format("~p ~p~n", [Key, Expected]), Res = rabbit_exchange_type_topic:which_matches( XName, list_to_binary(Key)), - io:format("Res: ~p~n", [Res]), ExpectedRes = lists:map( fun(Q) -> #resource{virtual_host = <<"/">>, kind = queue, -- cgit v1.2.1 From acc77d592b4b13c25a8d147f5889d7703c3fd401 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 30 Sep 2010 15:20:58 +0100 Subject: minor --- src/rabbit_exchange_type_topic.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 9091d385..078bacb6 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -222,20 +222,20 @@ trie_binding_op(X, Node, Q, Op) -> trie_has_any_children(X, Node) -> MatchHead = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, node_id = Node, - word = '$1'}, + _='_'}, _='_'}, Select = mnesia:select(rabbit_topic_trie_edge, - [{MatchHead, [], ['$1']}], 1, read), + [{MatchHead, [], ['$_']}], 1, read), select_while_no_result(Select) /= '$end_of_table'. trie_has_any_bindings(X, Node) -> MatchHead = #topic_trie_binding{ trie_binding = #trie_binding{exchange_name = X, node_id = Node, - queue_name = '$1'}, + _='_'}, _='_'}, Select = mnesia:select(rabbit_topic_trie_binding, - [{MatchHead, [], ['$1']}], 1, read), + [{MatchHead, [], ['$_']}], 1, read), select_while_no_result(Select) /= '$end_of_table'. select_while_no_result({[], Cont}) -> -- cgit v1.2.1 From aee4d8dac6bc4c1b1caafd87b41453c61f6b225f Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 30 Sep 2010 16:06:20 +0100 Subject: avoid using qlc --- src/rabbit_exchange_type_topic.erl | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 078bacb6..15ce487d 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -30,7 +30,7 @@ %% -module(rabbit_exchange_type_topic). --include_lib("stdlib/include/qlc.hrl"). + -include("rabbit.hrl"). -behaviour(rabbit_exchange_type). @@ -152,7 +152,7 @@ follow_down_create(X, Words) -> {ok, FinalNode} -> FinalNode; {error, Node, RestW} -> lists:foldl( fun(W, CurNode) -> - NewNode = new_node(), + NewNode = new_node_id(), trie_add_edge(X, CurNode, NewNode, W), NewNode end, Node, RestW) @@ -244,30 +244,23 @@ select_while_no_result(Other) -> Other. trie_remove_all_edges(X) -> - Query = qlc:q([Entry || - Entry = #topic_trie_edge{ - trie_edge = #trie_edge{exchange_name = X1, - _='_'}, - _='_'} - <- mnesia:table(rabbit_topic_trie_edge), - X1 == X]), + MatchHead = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + _='_'}, + _='_'}, lists:foreach( - fun(O) -> mnesia:delete_object(rabbit_topic_trie_edge, O, write) end, - qlc:e(Query)). - + fun(R) -> mnesia:delete_object(rabbit_topic_trie_edge, R, write) end, + mnesia:select(rabbit_topic_trie_edge, [{MatchHead, [], ['$_']}])). + trie_remove_all_bindings(X) -> - Query = qlc:q([Entry || - Entry = #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X1, - _='_'}, - _='_'} - <- mnesia:table(rabbit_topic_trie_binding), - X1 == X]), + MatchHead = #topic_trie_binding{trie_binding = + #trie_binding{exchange_name = X, + _='_'}, + _='_'}, lists:foreach( - fun(O) -> mnesia:delete_object(rabbit_topic_trie_binding, O, write) end, - qlc:e(Query)). + fun(R) -> mnesia:delete_object(rabbit_topic_trie_binding, R, write) end, + mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$_']}])). -new_node() -> +new_node_id() -> rabbit_guid:guid(). split_topic_key(Key) -> -- cgit v1.2.1 From cac0907c331a99082bbe0c5f0bc4eba990c3b98b Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 30 Sep 2010 20:09:28 +0100 Subject: using mnesia:match_object when selecting all records with a given pattern --- src/rabbit_exchange_type_topic.erl | 17 ++++++++--------- src/rabbit_tests.erl | 28 +++++++++++++++------------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 15ce487d..2c2e589a 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -244,21 +244,20 @@ select_while_no_result(Other) -> Other. trie_remove_all_edges(X) -> - MatchHead = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, - _='_'}, - _='_'}, + Pattern = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + _='_'}, + _='_'}, lists:foreach( fun(R) -> mnesia:delete_object(rabbit_topic_trie_edge, R, write) end, - mnesia:select(rabbit_topic_trie_edge, [{MatchHead, [], ['$_']}])). + mnesia:match_object(rabbit_topic_trie_edge, Pattern, write)). trie_remove_all_bindings(X) -> - MatchHead = #topic_trie_binding{trie_binding = - #trie_binding{exchange_name = X, - _='_'}, - _='_'}, + Pattern = #topic_trie_binding{trie_binding = #trie_binding{exchange_name =X, + _='_'}, + _='_'}, lists:foreach( fun(R) -> mnesia:delete_object(rabbit_topic_trie_binding, R, write) end, - mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$_']}])). + mnesia:match_object(rabbit_topic_trie_binding, Pattern, write)). new_node_id() -> rabbit_guid:guid(). diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index cee728ea..32c31bbf 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -622,32 +622,34 @@ test_topic_matching() -> {"#.*.#", "t22"}, {"*.#.#", "t23"}, {"#.#.#", "t24"}, - {"*", "t25"}]), + {"*", "t25"}, + {"#.b.#", "t26"}]), lists:foreach(fun(B) -> rabbit_exchange_type_topic:add_binding(X, B) end, Bindings), %% test some matches test_topic_expect_match(X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", "t18", "t20", - "t21", "t22", "t23", "t24"]}, + "t21", "t22", "t23", "t24", "t26"]}, {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", "t12", "t15", - "t21", "t22", "t23", "t24"]}, + "t21", "t22", "t23", "t24", "t26"]}, {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", "t18", "t21", - "t22", "t23", "t24"]}, + "t22", "t23", "t24", "t26"]}, {"", ["t5", "t6", "t17", "t24"]}, - {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24"]}, + {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24", "t26"]}, {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", "t23", "t24"]}, {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", "t24"]}, {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", "t24"]}, {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", "t22", "t23", - "t24"]}, + "t24", "t26"]}, {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", "t25"]}]), %% remove some bindings RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), - lists:nth(11, Bindings), lists:nth(21, Bindings)], + lists:nth(11, Bindings), lists:nth(19, Bindings), + lists:nth(21, Bindings)], rabbit_exchange_type_topic:remove_bindings(X, RemovedBindings), RemainingBindings = ordsets:to_list( ordsets:subtract(ordsets:from_list(Bindings), @@ -656,17 +658,17 @@ test_topic_matching() -> %% test some matches test_topic_expect_match(X, [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", "t23", - "t24"]}, + "t24", "t26"]}, {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", "t22", "t23", - "t24"]}, + "t24", "t26"]}, {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", "t23", - "t24"]}, + "t24", "t26"]}, {"", ["t6", "t17", "t24"]}, - {"b.c.c", ["t6", "t18", "t22", "t23", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, - {"vodka.martini", ["t6", "t8", "t19", "t22", "t23", "t24"]}, - {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", "t24", "t26"]}, {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), -- cgit v1.2.1 From eba47400be1446e878ddfb6ec6799b2bd2d712b2 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Tue, 5 Oct 2010 11:41:39 +0100 Subject: cosmetics --- src/rabbit_exchange_type_topic.erl | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2c2e589a..486c4f80 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -72,21 +72,21 @@ create(_X) -> ok. recover(_X, _Bs) -> ok. delete(#exchange{name = X}, _Bs) -> - rabbit_misc:execute_mnesia_transaction(fun() -> trie_remove_all_edges(X), - trie_remove_all_bindings(X) + rabbit_misc:execute_mnesia_transaction(fun () -> trie_remove_all_edges(X), + trie_remove_all_bindings(X) end), ok. add_binding(_Exchange, #binding{exchange_name = X, key = K, queue_name = Q}) -> rabbit_misc:execute_mnesia_transaction( - fun() -> FinalNode = follow_down_create(X, split_topic_key(K)), - trie_add_binding(X, FinalNode, Q) + fun () -> FinalNode = follow_down_create(X, split_topic_key(K)), + trie_add_binding(X, FinalNode, Q) end), ok. remove_bindings(_X, Bs) -> rabbit_misc:execute_mnesia_transaction( - fun() -> lists:foreach(fun remove_binding/1, Bs) end), + fun () -> lists:foreach(fun remove_binding/1, Bs) end), ok. remove_binding(#binding{exchange_name = X, key = K, queue_name = Q}) -> @@ -151,10 +151,10 @@ follow_down_create(X, Words) -> case follow_down(X, Words) of {ok, FinalNode} -> FinalNode; {error, Node, RestW} -> lists:foldl( - fun(W, CurNode) -> - NewNode = new_node_id(), - trie_add_edge(X, CurNode, NewNode, W), - NewNode + fun (W, CurNode) -> + NewNode = new_node_id(), + trie_add_edge(X, CurNode, NewNode, W), + NewNode end, Node, RestW) end. @@ -170,8 +170,7 @@ follow_down_get_path(X, CurNode, [W | RestW], PathAcc) -> remove_path_if_empty(_, [{root, none}]) -> ok; remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) -> - case trie_has_any_bindings(X, Node) orelse - trie_has_any_children(X, Node) of + case trie_has_any_bindings(X, Node) orelse trie_has_any_children(X, Node) of true -> ok; false -> trie_remove_edge(X, Parent, Node, W), remove_path_if_empty(X, RestPath) @@ -248,7 +247,7 @@ trie_remove_all_edges(X) -> _='_'}, _='_'}, lists:foreach( - fun(R) -> mnesia:delete_object(rabbit_topic_trie_edge, R, write) end, + fun (R) -> mnesia:delete_object(rabbit_topic_trie_edge, R, write) end, mnesia:match_object(rabbit_topic_trie_edge, Pattern, write)). trie_remove_all_bindings(X) -> @@ -256,7 +255,7 @@ trie_remove_all_bindings(X) -> _='_'}, _='_'}, lists:foreach( - fun(R) -> mnesia:delete_object(rabbit_topic_trie_binding, R, write) end, + fun (R) -> mnesia:delete_object(rabbit_topic_trie_binding, R, write) end, mnesia:match_object(rabbit_topic_trie_binding, Pattern, write)). new_node_id() -> -- cgit v1.2.1 From 45a97a0535d43f2556cb8caa39af0ff00538ea5c Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 14 Oct 2010 20:26:19 +0100 Subject: fixing merge conflicts --- include/rabbit.hrl | 2 +- src/rabbit_exchange_type_topic.erl | 45 ++++++++++++++------------------------ src/rabbit_router.erl | 3 --- src/rabbit_tests.erl | 36 ++++++++++++++++-------------- 4 files changed, 36 insertions(+), 50 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 59e39d92..282be001 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -67,7 +67,7 @@ -record(topic_trie_binding, {trie_binding, value = const}). -record(trie_edge, {exchange_name, node_id, word}). --record(trie_binding, {exchange_name, node_id, queue_name}). +-record(trie_binding, {exchange_name, node_id, destination}). -record(listener, {node, protocol, host, port}). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 89598958..3b0f1505 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -47,25 +47,17 @@ {requires, rabbit_exchange_type_registry}, {enables, kernel_ready}]}). --export([which_matches/2]). - --ifdef(use_specs). - --spec(which_matches/2 :: - (rabbit_exchange:name(), rabbit_router:routing_key()) -> - [rabbit_amqqueue:name()]). - --endif. - %%---------------------------------------------------------------------------- description() -> [{name, <<"topic">>}, {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. -route(#exchange{name = X}, Delivery = - #delivery{message = #basic_message{routing_key = Key}}) -> - which_matches(X, Key). +%% NB: This may return duplicate results in some situations (that's ok) +route(#exchange{name = X}, + #delivery{message = #basic_message{routing_key = Key}}) -> + Words = split_topic_key(Key), + mnesia:async_dirty(fun trie_match/2, [X, Words]). validate(_X) -> ok. create(_X) -> ok. @@ -77,10 +69,10 @@ delete(#exchange{name = X}, _Bs) -> end), ok. -add_binding(_Exchange, #binding{exchange_name = X, key = K, queue_name = Q}) -> +add_binding(_Exchange, #binding{source = X, key = K, destination = D}) -> rabbit_misc:execute_mnesia_transaction( fun () -> FinalNode = follow_down_create(X, split_topic_key(K)), - trie_add_binding(X, FinalNode, Q) + trie_add_binding(X, FinalNode, D) end), ok. @@ -89,21 +81,16 @@ remove_bindings(_X, Bs) -> fun () -> lists:foreach(fun remove_binding/1, Bs) end), ok. -remove_binding(#binding{exchange_name = X, key = K, queue_name = Q}) -> +remove_binding(#binding{source = X, key = K, destination = D}) -> Path = follow_down_get_path(X, split_topic_key(K)), {FinalNode, _} = hd(Path), - trie_remove_binding(X, FinalNode, Q), + trie_remove_binding(X, FinalNode, D), remove_path_if_empty(X, Path), ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). -%% NB: This function may return duplicate results in some situations (that's ok) -which_matches(X, Key) -> - Words = split_topic_key(Key), - mnesia:async_dirty(fun trie_match/2, [X, Words]). - %%---------------------------------------------------------------------------- trie_match(X, Words) -> @@ -188,7 +175,7 @@ trie_bindings(X, Node) -> MatchHead = #topic_trie_binding{ trie_binding = #trie_binding{exchange_name = X, node_id = Node, - queue_name = '$1'}}, + destination = '$1'}}, mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]). trie_add_edge(X, FromNode, ToNode, W) -> @@ -205,17 +192,17 @@ trie_edge_op(X, FromNode, ToNode, W, Op) -> node_id = ToNode}, write). -trie_add_binding(X, Node, Q) -> - trie_binding_op(X, Node, Q, fun mnesia:write/3). +trie_add_binding(X, Node, D) -> + trie_binding_op(X, Node, D, fun mnesia:write/3). -trie_remove_binding(X, Node, Q) -> - trie_binding_op(X, Node, Q, fun mnesia:delete_object/3). +trie_remove_binding(X, Node, D) -> + trie_binding_op(X, Node, D, fun mnesia:delete_object/3). -trie_binding_op(X, Node, Q, Op) -> +trie_binding_op(X, Node, D, Op) -> ok = Op(rabbit_topic_trie_binding, #topic_trie_binding{trie_binding = #trie_binding{exchange_name = X, node_id = Node, - queue_name = Q}}, + destination = D}}, write). trie_has_any_children(X, Node) -> diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 05bda8b0..00df1ce1 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -84,9 +84,6 @@ deliver(QNames, Delivery) -> check_delivery(Delivery#delivery.mandatory, Delivery#delivery.immediate, {Routed, Handled}). -deliver_by_queue_names(Qs, Delivery) -> - deliver(lookup_qpids(Qs), Delivery). - %% TODO: Maybe this should be handled by a cursor instead. %% TODO: This causes a full scan for each entry with the same source match_bindings(SrcName, Match) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 641fb6fb..4b3059be 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -592,12 +592,12 @@ test_topic_matching() -> %% add some bindings Bindings = lists:map( - fun({Key, Q}) -> - #binding{exchange_name = XName, - key = list_to_binary(Key), - queue_name = #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)}} + fun ({Key, Q}) -> + #binding{source = XName, + key = list_to_binary(Key), + destination = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}} end, [{"a.b.c", "t1"}, {"a.*.c", "t2"}, {"a.#.b", "t3"}, @@ -624,7 +624,7 @@ test_topic_matching() -> {"#.#.#", "t24"}, {"*", "t25"}, {"#.b.#", "t26"}]), - lists:foreach(fun(B) -> rabbit_exchange_type_topic:add_binding(X, B) end, + lists:foreach(fun (B) -> rabbit_exchange_type_topic:add_binding(X, B) end, Bindings), %% test some matches @@ -678,17 +678,19 @@ test_topic_matching() -> test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), passed. -test_topic_expect_match(#exchange{name = XName}, List) -> +test_topic_expect_match(X, List) -> lists:foreach( - fun({Key, Expected}) -> - Res = rabbit_exchange_type_topic:which_matches( - XName, list_to_binary(Key)), - ExpectedRes = lists:map( - fun(Q) -> #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)} - end, Expected), - true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) + fun ({Key, Expected}) -> + BinKey = list_to_binary(Key), + Res = rabbit_exchange_type_topic:route( + X, #delivery{message = #basic_message{routing_key = + BinKey}}), + ExpectedRes = lists:map( + fun (Q) -> #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)} + end, Expected), + true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) end, List). test_app_management() -> -- cgit v1.2.1 From 4f7cf9e15982a14d8ec8518631950914a7a6262d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 29 Nov 2010 12:19:59 +0000 Subject: Added mechanism to dynamically choose the backing queue module. Ideally, this'll end up using the generic registry that's part of bug 23467 and allow plugins to register this stuff on boot --- src/rabbit_amqqueue_process.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index a999fe58..87d074c2 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -114,12 +114,11 @@ info_keys() -> ?INFO_KEYS. init(Q) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), process_flag(trap_exit, true), - {ok, BQ} = application:get_env(backing_queue_module), {ok, #q{q = Q#amqqueue{pid = self()}, exclusive_consumer = none, has_had_consumers = false, - backing_queue = BQ, + backing_queue = backing_queue_module(Q), backing_queue_state = undefined, active_consumers = queue:new(), blocked_consumers = queue:new(), @@ -223,6 +222,13 @@ next_state(State) -> false -> {stop_sync_timer(State2), hibernate} end. +backing_queue_module(#amqqueue{arguments = Args}) -> + case rabbit_misc:table_lookup(Args, <<"x-mirror">>) of + undefined -> {ok, BQM} = application:get_env(backing_queue_module), + BQM; + _Nodes -> rabbit_mirror_queue_master + end. + ensure_sync_timer(State = #q{sync_timer_ref = undefined, backing_queue = BQ}) -> {ok, TRef} = timer:apply_after( ?SYNC_INTERVAL, -- cgit v1.2.1 From d895717caa4cfb16a031e8d449404585950678bb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 29 Nov 2010 12:33:31 +0000 Subject: Present the whole amqqueue record to the BQ --- src/rabbit_amqqueue_process.erl | 4 ++-- src/rabbit_invariable_queue.erl | 2 +- src/rabbit_variable_queue.erl | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 87d074c2..2ae05300 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -150,7 +150,7 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- declare(Recover, From, - State = #q{q = Q = #amqqueue{name = QName, durable = IsDurable}, + State = #q{q = Q = #amqqueue{durable = IsDurable}, backing_queue = BQ, backing_queue_state = undefined, stats_timer = StatsTimer}) -> case rabbit_amqqueue:internal_declare(Q, Recover) of @@ -162,7 +162,7 @@ declare(Recover, From, ok = rabbit_memory_monitor:register( self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), - BQS = BQ:init(QName, IsDurable, Recover), + BQS = BQ:init(Q, IsDurable, Recover), State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), diff --git a/src/rabbit_invariable_queue.erl b/src/rabbit_invariable_queue.erl index 5a0532ea..a2ed13d5 100644 --- a/src/rabbit_invariable_queue.erl +++ b/src/rabbit_invariable_queue.erl @@ -64,7 +64,7 @@ start(DurableQueues) -> stop() -> ok = rabbit_sup:stop_child(rabbit_persister). -init(QName, IsDurable, Recover) -> +init(#amqqueue { name = QName }, IsDurable, Recover) -> Q = queue:from_list(case IsDurable andalso Recover of true -> rabbit_persister:queue_content(QName); false -> [] diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 5ac042a2..dd39a1c2 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -403,7 +403,7 @@ stop_msg_store() -> ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). -init(QueueName, IsDurable, false) -> +init(#amqqueue { name = QueueName }, IsDurable, false) -> IndexState = rabbit_queue_index:init(QueueName), init(IsDurable, IndexState, 0, [], case IsDurable of @@ -412,7 +412,7 @@ init(QueueName, IsDurable, false) -> end, msg_store_client_init(?TRANSIENT_MSG_STORE)); -init(QueueName, true, true) -> +init(#amqqueue { name = QueueName }, true, true) -> Terms = rabbit_queue_index:shutdown_terms(QueueName), {PRef, TRef, Terms1} = case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of -- cgit v1.2.1 From 809e3999e60dd89e1e8e86a84695f45202232e11 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 29 Nov 2010 12:40:52 +0000 Subject: Present the whole amqqueue record to the BQ (remove duplicated IsDurable flag, correct BQ callbacks) --- include/rabbit_backing_queue_spec.hrl | 3 +-- src/rabbit_amqqueue_process.erl | 5 ++--- src/rabbit_backing_queue.erl | 2 +- src/rabbit_invariable_queue.erl | 4 ++-- src/rabbit_variable_queue.erl | 6 +++--- 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 20230b24..7b7f3885 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -43,8 +43,7 @@ -spec(start/1 :: ([rabbit_amqqueue:name()]) -> 'ok'). -spec(stop/0 :: () -> 'ok'). --spec(init/3 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery()) -> - state()). +-spec(init/2 :: (rabbit_types:amqqueue(), attempt_recovery()) -> state()). -spec(terminate/1 :: (state()) -> state()). -spec(delete_and_terminate/1 :: (state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 2ae05300..08c688c7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -150,8 +150,7 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- declare(Recover, From, - State = #q{q = Q = #amqqueue{durable = IsDurable}, - backing_queue = BQ, backing_queue_state = undefined, + State = #q{q = Q, backing_queue = BQ, backing_queue_state = undefined, stats_timer = StatsTimer}) -> case rabbit_amqqueue:internal_declare(Q, Recover) of not_found -> {stop, normal, not_found, State}; @@ -162,7 +161,7 @@ declare(Recover, From, ok = rabbit_memory_monitor:register( self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), - BQS = BQ:init(Q, IsDurable, Recover), + BQS = BQ:init(Q, Recover), State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 352e76fd..7237f0ea 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -48,7 +48,7 @@ behaviour_info(callbacks) -> {stop, 0}, %% Initialise the backing queue and its state. - {init, 3}, + {init, 2}, %% Called on queue shutdown when queue isn't being deleted. {terminate, 1}, diff --git a/src/rabbit_invariable_queue.erl b/src/rabbit_invariable_queue.erl index a2ed13d5..41aff185 100644 --- a/src/rabbit_invariable_queue.erl +++ b/src/rabbit_invariable_queue.erl @@ -31,7 +31,7 @@ -module(rabbit_invariable_queue). --export([init/3, terminate/1, delete_and_terminate/1, purge/1, publish/3, +-export([init/2, terminate/1, delete_and_terminate/1, purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, tx_publish/4, tx_ack/3, dropwhile/2, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, @@ -64,7 +64,7 @@ start(DurableQueues) -> stop() -> ok = rabbit_sup:stop_child(rabbit_persister). -init(#amqqueue { name = QName }, IsDurable, Recover) -> +init(#amqqueue { name = QName, durable = IsDurable }, Recover) -> Q = queue:from_list(case IsDurable andalso Recover of true -> rabbit_persister:queue_content(QName); false -> [] diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index dd39a1c2..73a68ec3 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -31,7 +31,7 @@ -module(rabbit_variable_queue). --export([init/3, terminate/1, delete_and_terminate/1, +-export([init/2, terminate/1, delete_and_terminate/1, purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, @@ -403,7 +403,7 @@ stop_msg_store() -> ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). -init(#amqqueue { name = QueueName }, IsDurable, false) -> +init(#amqqueue { name = QueueName, durable = IsDurable }, false) -> IndexState = rabbit_queue_index:init(QueueName), init(IsDurable, IndexState, 0, [], case IsDurable of @@ -412,7 +412,7 @@ init(#amqqueue { name = QueueName }, IsDurable, false) -> end, msg_store_client_init(?TRANSIENT_MSG_STORE)); -init(#amqqueue { name = QueueName }, true, true) -> +init(#amqqueue { name = QueueName, durable = true }, true) -> Terms = rabbit_queue_index:shutdown_terms(QueueName), {PRef, TRef, Terms1} = case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of -- cgit v1.2.1 From 5a2ad8926058366a3cd01dde6f8a9f96df1cf283 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 29 Nov 2010 16:37:03 +0000 Subject: Lesser of many evils... --- include/rabbit.hrl | 2 +- src/rabbit_amqqueue.erl | 11 ++++++----- src/rabbit_router.erl | 6 ++++-- src/rabbit_types.erl | 3 ++- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index a1987fb2..09cc3eb3 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -54,7 +54,7 @@ -record(exchange, {name, type, durable, auto_delete, arguments}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, - arguments, pid}). + arguments, pid, mirror_pids}). %% mnesia doesn't like unary records, so we add a dummy 'value' field -record(route, {binding, value = const}). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index fa417544..5390bb86 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -203,12 +203,13 @@ recover_durable_queues(DurableQueues) -> declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), - Q = start_queue_process(#amqqueue{name = QueueName, - durable = Durable, - auto_delete = AutoDelete, - arguments = Args, + Q = start_queue_process(#amqqueue{name = QueueName, + durable = Durable, + auto_delete = AutoDelete, + arguments = Args, exclusive_owner = Owner, - pid = none}), + pid = none, + mirror_pids = []}), case gen_server2:call(Q#amqqueue.pid, {init, false}) of not_found -> rabbit_misc:not_found(QueueName); Q1 -> Q1 diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 00df1ce1..b1ce380b 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -116,7 +116,9 @@ check_delivery(_ , _ , {_ , Qs}) -> {routed, Qs}. lookup_qpids(QNames) -> lists:foldl(fun (QName, QPids) -> case mnesia:dirty_read({rabbit_queue, QName}) of - [#amqqueue{pid = QPid}] -> [QPid | QPids]; - [] -> QPids + [#amqqueue{pid = QPid, mirror_pids = Pids}] -> + Pids ++ [QPid | QPids]; + [] -> + QPids end end, [], QNames). diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index b9993823..3a0c0925 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -136,7 +136,8 @@ auto_delete :: boolean(), exclusive_owner :: rabbit_types:maybe(pid()), arguments :: rabbit_framing:amqp_table(), - pid :: rabbit_types:maybe(pid())}). + pid :: rabbit_types:maybe(pid()), + mirror_pids :: [pid()]}). -type(exchange() :: #exchange{name :: rabbit_exchange:name(), -- cgit v1.2.1 From b1eeb9af85fdd85a44d164f202c6879a74924cb6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 29 Nov 2010 17:20:26 +0000 Subject: Make the tests work again. Also mirror_pids => extra_pids on the basis that it could be reusable for other purposes --- include/rabbit.hrl | 2 +- src/rabbit_amqqueue.erl | 2 +- src/rabbit_router.erl | 4 ++-- src/rabbit_tests.erl | 14 +++++++++----- src/rabbit_types.erl | 2 +- 5 files changed, 14 insertions(+), 10 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 09cc3eb3..7bea4f52 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -54,7 +54,7 @@ -record(exchange, {name, type, durable, auto_delete, arguments}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, - arguments, pid, mirror_pids}). + arguments, pid, extra_pids}). %% mnesia doesn't like unary records, so we add a dummy 'value' field -record(route, {binding, value = const}). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 5390bb86..fd157231 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -209,7 +209,7 @@ declare(QueueName, Durable, AutoDelete, Args, Owner) -> arguments = Args, exclusive_owner = Owner, pid = none, - mirror_pids = []}), + extra_pids = []}), case gen_server2:call(Q#amqqueue.pid, {init, false}) of not_found -> rabbit_misc:not_found(QueueName); Q1 -> Q1 diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index b1ce380b..40e4edee 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -116,8 +116,8 @@ check_delivery(_ , _ , {_ , Qs}) -> {routed, Qs}. lookup_qpids(QNames) -> lists:foldl(fun (QName, QPids) -> case mnesia:dirty_read({rabbit_queue, QName}) of - [#amqqueue{pid = QPid, mirror_pids = Pids}] -> - Pids ++ [QPid | QPids]; + [#amqqueue{pid = QPid, extra_pids = EPids}] -> + EPids ++ [QPid | QPids]; [] -> QPids end diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 27e4d925..a63baddb 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1848,9 +1848,13 @@ assert_prop(List, Prop, Value) -> assert_props(List, PropVals) -> [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals]. +test_amqqueue(Durable) -> + #amqqueue{name = test_queue(), + durable = Durable}. + with_fresh_variable_queue(Fun) -> ok = empty_test_queue(), - VQ = rabbit_variable_queue:init(test_queue(), true, false), + VQ = rabbit_variable_queue:init(test_amqqueue(true), false), S0 = rabbit_variable_queue:status(VQ), assert_props(S0, [{q1, 0}, {q2, 0}, {delta, {delta, undefined, 0, undefined}}, @@ -2025,7 +2029,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = rabbit_variable_queue:init(test_queue(), true, true), + VQ7 = rabbit_variable_queue:init(test_amqqueue(true), true), {{_Msg1, true, _AckTag1, Count1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), VQ9 = variable_queue_publish(false, 1, VQ8), @@ -2041,14 +2045,14 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> VQ4 = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:idle_timeout(VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = rabbit_variable_queue:init(test_queue(), true, true), + VQ7 = rabbit_variable_queue:init(test_amqqueue(true), true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), VQ8. test_queue_recover() -> Count = 2 * rabbit_queue_index:next_segment_boundary(0), TxID = rabbit_guid:guid(), - {new, #amqqueue { pid = QPid, name = QName }} = + {new, #amqqueue { pid = QPid, name = QName } = Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), <<>>, #'P_basic'{delivery_mode = 2}, <<>>), @@ -2071,7 +2075,7 @@ test_queue_recover() -> {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} = rabbit_amqqueue:basic_get(Q1, self(), false), exit(QPid1, shutdown), - VQ1 = rabbit_variable_queue:init(QName, true, true), + VQ1 = rabbit_variable_queue:init(Q, true), {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 3a0c0925..4709532d 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -137,7 +137,7 @@ exclusive_owner :: rabbit_types:maybe(pid()), arguments :: rabbit_framing:amqp_table(), pid :: rabbit_types:maybe(pid()), - mirror_pids :: [pid()]}). + extra_pids :: [pid()]}). -type(exchange() :: #exchange{name :: rabbit_exchange:name(), -- cgit v1.2.1 From b0e19d5fd7803bfae1b7fe6a73d897f659f5151b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 30 Nov 2010 12:05:37 +0000 Subject: Exposing the publishing chpid right through to the BQ (and tidying up tests/types/specs) --- include/rabbit_backing_queue_spec.hrl | 14 ++++++++------ src/rabbit_amqqueue.erl | 9 +++++---- src/rabbit_amqqueue_process.erl | 12 ++++++------ src/rabbit_backing_queue.erl | 6 +++--- src/rabbit_invariable_queue.erl | 21 +++++++++++---------- src/rabbit_tests.erl | 8 ++++---- src/rabbit_variable_queue.erl | 15 ++++++++------- 7 files changed, 45 insertions(+), 40 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 7b7f3885..fd6d6b8a 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -47,18 +47,20 @@ -spec(terminate/1 :: (state()) -> state()). -spec(delete_and_terminate/1 :: (state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). --spec(publish/3 :: (rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) -> state()). --spec(publish_delivered/4 :: (ack_required(), rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) +-spec(publish/4 :: (rabbit_types:basic_message(), + rabbit_types:message_properties(), pid(), state()) + -> state()). +-spec(publish_delivered/5 :: (ack_required(), rabbit_types:basic_message(), + rabbit_types:message_properties(), pid(), state()) -> {ack(), state()}). -spec(dropwhile/2 :: (fun ((rabbit_types:message_properties()) -> boolean()), state()) -> state()). -spec(fetch/2 :: (ack_required(), state()) -> {fetch_result(), state()}). -spec(ack/2 :: ([ack()], state()) -> state()). --spec(tx_publish/4 :: (rabbit_types:txn(), rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) -> state()). +-spec(tx_publish/5 :: (rabbit_types:txn(), rabbit_types:basic_message(), + rabbit_types:message_properties(), pid(), state()) -> + state()). -spec(tx_ack/3 :: (rabbit_types:txn(), [ack()], state()) -> state()). -spec(tx_rollback/2 :: (rabbit_types:txn(), state()) -> {[ack()], state()}). -spec(tx_commit/4 :: diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index fd157231..1bbe3f1c 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -499,11 +499,12 @@ delete_queue(QueueName) -> rabbit_binding:remove_transient_for_destination(QueueName). pseudo_queue(QueueName, Pid) -> - #amqqueue{name = QueueName, - durable = false, + #amqqueue{name = QueueName, + durable = false, auto_delete = false, - arguments = [], - pid = Pid}. + arguments = [], + pid = Pid, + extra_pids = []}. safe_delegate_call_ok(F, Pids) -> {_, Bad} = delegate:invoke(Pids, diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 08c688c7..1e45ef0b 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -438,7 +438,7 @@ run_message_queue(State) -> {_IsEmpty1, State2} = deliver_msgs_to_consumers(Funs, IsEmpty, State1), State2. -attempt_delivery(none, _ChPid, Message, State = #q{backing_queue = BQ}) -> +attempt_delivery(none, ChPid, Message, State = #q{backing_queue = BQ}) -> PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = fun (AckRequired, false, State1 = #q{backing_queue_state = BQS}) -> @@ -447,7 +447,7 @@ attempt_delivery(none, _ChPid, Message, State = #q{backing_queue = BQ}) -> %% message_properties. {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Message, - ?BASE_MESSAGE_PROPERTIES, BQS), + ?BASE_MESSAGE_PROPERTIES, ChPid, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} end, @@ -455,9 +455,9 @@ attempt_delivery(none, _ChPid, Message, State = #q{backing_queue = BQ}) -> attempt_delivery(Txn, ChPid, Message, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> record_current_channel_tx(ChPid, Txn), - {true, - State#q{backing_queue_state = - BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS)}}. + {true, State#q{backing_queue_state = + BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, + ChPid, BQS)}}. deliver_or_enqueue(Txn, ChPid, Message, State = #q{backing_queue = BQ}) -> case attempt_delivery(Txn, ChPid, Message, State) of @@ -466,7 +466,7 @@ deliver_or_enqueue(Txn, ChPid, Message, State = #q{backing_queue = BQ}) -> {false, NewState} -> %% Txn is none and no unblocked channels with consumers BQS = BQ:publish(Message, - message_properties(State), + message_properties(State), ChPid, State #q.backing_queue_state), {false, ensure_ttl_timer(NewState#q{backing_queue_state = BQS})} end. diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 7237f0ea..d04944f9 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -62,12 +62,12 @@ behaviour_info(callbacks) -> {purge, 1}, %% Publish a message. - {publish, 3}, + {publish, 4}, %% Called for messages which have already been passed straight %% out to a client. The queue will be empty for these calls %% (i.e. saves the round trip through the backing queue). - {publish_delivered, 4}, + {publish_delivered, 5}, %% Drop messages from the head of the queue while the supplied %% predicate returns true. @@ -81,7 +81,7 @@ behaviour_info(callbacks) -> {ack, 2}, %% A publish, but in the context of a transaction. - {tx_publish, 4}, + {tx_publish, 5}, %% Acks, but in the context of a transaction. {tx_ack, 3}, diff --git a/src/rabbit_invariable_queue.erl b/src/rabbit_invariable_queue.erl index 41aff185..51819799 100644 --- a/src/rabbit_invariable_queue.erl +++ b/src/rabbit_invariable_queue.erl @@ -31,8 +31,8 @@ -module(rabbit_invariable_queue). --export([init/2, terminate/1, delete_and_terminate/1, purge/1, publish/3, - publish_delivered/4, fetch/2, ack/2, tx_publish/4, tx_ack/3, +-export([init/2, terminate/1, delete_and_terminate/1, purge/1, publish/4, + publish_delivered/5, fetch/2, ack/2, tx_publish/5, tx_ack/3, dropwhile/2, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, status/1]). @@ -100,17 +100,17 @@ purge(State = #iv_state { queue = Q, qname = QName, durable = IsDurable, ok = persist_acks(QName, IsDurable, none, AckTags, PA), {Len, State #iv_state { len = 0, queue = queue:new() }}. -publish(Msg, MsgProps, State = #iv_state { queue = Q, - qname = QName, - durable = IsDurable, - len = Len }) -> +publish(Msg, MsgProps, _ChPid, State = #iv_state { queue = Q, + qname = QName, + durable = IsDurable, + len = Len }) -> ok = persist_message(QName, IsDurable, none, Msg, MsgProps), State #iv_state { queue = enqueue(Msg, MsgProps, false, Q), len = Len + 1 }. -publish_delivered(false, _Msg, _MsgProps, State) -> +publish_delivered(false, _Msg, _MsgProps, _ChPid, State) -> {blank_ack, State}; publish_delivered(true, Msg = #basic_message { guid = Guid }, - MsgProps, + MsgProps, _ChPid, State = #iv_state { qname = QName, durable = IsDurable, len = 0, pending_ack = PA }) -> ok = persist_message(QName, IsDurable, none, Msg, MsgProps), @@ -159,8 +159,9 @@ ack(AckTags, State = #iv_state { qname = QName, durable = IsDurable, PA1 = remove_acks(AckTags, PA), State #iv_state { pending_ack = PA1 }. -tx_publish(Txn, Msg, MsgProps, State = #iv_state { qname = QName, - durable = IsDurable }) -> +tx_publish(Txn, Msg, MsgProps, _ChPid, + State = #iv_state { qname = QName, + durable = IsDurable }) -> Tx = #tx { pending_messages = Pubs } = lookup_tx(Txn), store_tx(Txn, Tx #tx { pending_messages = [{Msg, MsgProps} | Pubs] }), ok = persist_message(QName, IsDurable, Txn, Msg, MsgProps), diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index a63baddb..572f1457 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1830,7 +1830,7 @@ variable_queue_publish(IsPersistent, Count, VQ) -> true -> 2; false -> 1 end}, <<>>), - #message_properties{}, VQN) + #message_properties{}, self(), VQN) end, VQ, lists:seq(1, Count)). variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) -> @@ -1849,8 +1849,8 @@ assert_props(List, PropVals) -> [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals]. test_amqqueue(Durable) -> - #amqqueue{name = test_queue(), - durable = Durable}. + (rabbit_amqqueue:pseudo_queue(test_queue(), self())) + #amqqueue { durable = Durable }. with_fresh_variable_queue(Fun) -> ok = empty_test_queue(), @@ -1912,7 +1912,7 @@ test_dropwhile(VQ0) -> rabbit_basic:message( rabbit_misc:r(<<>>, exchange, <<>>), <<>>, #'P_basic'{}, <<>>), - #message_properties{expiry = N}, VQN) + #message_properties{expiry = N}, self(), VQN) end, VQ0, lists:seq(1, Count)), %% drop the first 5 messages diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 73a68ec3..cd4101fb 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -32,8 +32,8 @@ -module(rabbit_variable_queue). -export([init/2, terminate/1, delete_and_terminate/1, - purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, - tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, + purge/1, publish/4, publish_delivered/5, fetch/2, ack/2, + tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, @@ -501,14 +501,15 @@ purge(State = #vqstate { q4 = Q4, ram_index_count = 0, persistent_count = PCount1 })}. -publish(Msg, MsgProps, State) -> +publish(Msg, MsgProps, _ChPid, State) -> {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), a(reduce_memory_use(State1)). -publish_delivered(false, _Msg, _MsgProps, State = #vqstate { len = 0 }) -> +publish_delivered(false, _Msg, _MsgProps, _ChPid, + State = #vqstate { len = 0 }) -> {blank_ack, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent }, - MsgProps, + MsgProps, _ChPid, State = #vqstate { len = 0, next_seq_id = SeqId, out_counter = OutCount, @@ -640,8 +641,8 @@ ack(AckTags, State) -> AckTags, State)). tx_publish(Txn, Msg = #basic_message { is_persistent = IsPersistent }, MsgProps, - State = #vqstate { durable = IsDurable, - msg_store_clients = MSCState }) -> + _ChPid, State = #vqstate { durable = IsDurable, + msg_store_clients = MSCState }) -> Tx = #tx { pending_messages = Pubs } = lookup_tx(Txn), store_tx(Txn, Tx #tx { pending_messages = [{Msg, MsgProps} | Pubs] }), case IsPersistent andalso IsDurable of -- cgit v1.2.1 From 4e9f0dc61d494aded3e6600304215b40a3c8f32d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 30 Nov 2010 12:20:12 +0000 Subject: experiment with uing {active,once} instead of prim_inet:async_recv --- src/rabbit_net.erl | 10 +++++++++- src/rabbit_reader.erl | 52 +++++++++++++++++++++++++++++---------------------- 2 files changed, 39 insertions(+), 23 deletions(-) diff --git a/src/rabbit_net.erl b/src/rabbit_net.erl index 0940dce2..c9e3cc47 100644 --- a/src/rabbit_net.erl +++ b/src/rabbit_net.erl @@ -34,7 +34,7 @@ -export([async_recv/3, close/1, controlling_process/2, getstat/2, peername/1, peercert/1, port_command/2, - send/2, sockname/1, is_ssl/1]). + send/2, sockname/1, is_ssl/1, setopts/2]). %%--------------------------------------------------------------------------- @@ -69,6 +69,9 @@ -spec(getstat/2 :: (socket(), [stat_option()]) -> ok_val_or_error([{stat_option(), integer()}])). +-spec(setopts/2 :: (socket(), [{atom(), any()} | + {raw, non_neg_integer(), non_neg_integer(), + binary()}]) -> ok_or_any_error()). -endif. @@ -137,3 +140,8 @@ sockname(Sock) when is_port(Sock) -> is_ssl(Sock) -> ?IS_SSL(Sock). + +setopts(Sock, Options) when ?IS_SSL(Sock) -> + ssl:setopts(Sock#ssl_socket.ssl, Options); +setopts(Sock, Options) when is_port(Sock) -> + inet:setopts(Sock, Options). diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 4dd150a2..1d5b2021 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -54,9 +54,9 @@ %--------------------------------------------------------------------------- --record(v1, {parent, sock, connection, callback, recv_length, recv_ref, +-record(v1, {parent, sock, connection, callback, recv_length, pending_recv, connection_state, queue_collector, heartbeater, stats_timer, - channel_sup_sup_pid, start_heartbeat_fun}). + channel_sup_sup_pid, start_heartbeat_fun, buf}). -define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt, send_pend, state, channels]). @@ -275,7 +275,7 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, erlang:send_after(?HANDSHAKE_TIMEOUT * 1000, self(), handshake_timeout), try - mainloop(Deb, switch_callback( + recvloop(Deb, switch_callback( #v1{parent = Parent, sock = ClientSock, connection = #connection{ @@ -287,14 +287,15 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, client_properties = none}, callback = uninitialized_callback, recv_length = 0, - recv_ref = none, + pending_recv = false, connection_state = pre_init, queue_collector = Collector, heartbeater = none, stats_timer = rabbit_event:init_stats_timer(), channel_sup_sup_pid = ChannelSupSupPid, - start_heartbeat_fun = StartHeartbeatFun + start_heartbeat_fun = StartHeartbeatFun, + buf = [<<>>] }, handshake, 8)) catch @@ -317,21 +318,33 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, end, done. -mainloop(Deb, State = #v1{parent = Parent, sock= Sock, recv_ref = Ref}) -> +recvloop(Deb, State = #v1{pending_recv = true}) -> + mainloop(Deb, State); +recvloop(Deb, State = #v1{connection_state = blocked}) -> + mainloop(Deb, State); +recvloop(Deb, State = #v1{sock = Sock, recv_length = Length, buf = Buf}) -> + case iolist_size(Buf) < Length of + true -> ok = rabbit_net:setopts(Sock, [{active, once}]), + mainloop(Deb, State#v1{pending_recv = true}); + false -> {Data, Rest} = split_binary( + list_to_binary(lists:reverse(Buf)), Length), + recvloop(Deb, handle_input(State#v1.callback, Data, + State#v1{buf = [Rest]})) + end. + +mainloop(Deb, State = #v1{parent = Parent, sock = Sock}) -> receive - {inet_async, Sock, Ref, {ok, Data}} -> - mainloop(Deb, handle_input(State#v1.callback, Data, - State#v1{recv_ref = none})); - {inet_async, Sock, Ref, {error, closed}} -> + {tcp, Sock, Data} -> + recvloop(Deb, State#v1{buf = [Data | State#v1.buf], + pending_recv = false}); + {tcp_closed, Sock} -> if State#v1.connection_state =:= closed -> State; true -> throw(connection_closed_abruptly) end; - {inet_async, Sock, Ref, {error, Reason}} -> - throw({inet_error, Reason}); {conserve_memory, Conserve} -> - mainloop(Deb, internal_conserve_memory(Conserve, State)); + recvloop(Deb, internal_conserve_memory(Conserve, State)); {'EXIT', Parent, Reason} -> terminate(io_lib:format("broker forced connection closure " "with reason '~w'", [Reason]), State), @@ -391,11 +404,9 @@ mainloop(Deb, State = #v1{parent = Parent, sock= Sock, recv_ref = Ref}) -> switch_callback(State = #v1{connection_state = blocked, heartbeater = Heartbeater}, Callback, Length) -> ok = rabbit_heartbeat:pause_monitor(Heartbeater), - State#v1{callback = Callback, recv_length = Length, recv_ref = none}; + State#v1{callback = Callback, recv_length = Length}; switch_callback(State, Callback, Length) -> - Ref = inet_op(fun () -> rabbit_net:async_recv( - State#v1.sock, Length, infinity) end), - State#v1{callback = Callback, recv_length = Length, recv_ref = Ref}. + State#v1{callback = Callback, recv_length = Length}. terminate(Explanation, State) when ?IS_RUNNING(State) -> {normal, send_exception(State, 0, @@ -409,12 +420,9 @@ internal_conserve_memory(true, State = #v1{connection_state = running}) -> internal_conserve_memory(false, State = #v1{connection_state = blocking}) -> State#v1{connection_state = running}; internal_conserve_memory(false, State = #v1{connection_state = blocked, - heartbeater = Heartbeater, - callback = Callback, - recv_length = Length, - recv_ref = none}) -> + heartbeater = Heartbeater}) -> ok = rabbit_heartbeat:resume_monitor(Heartbeater), - switch_callback(State#v1{connection_state = running}, Callback, Length); + State#v1{connection_state = running}; internal_conserve_memory(_Conserve, State) -> State. -- cgit v1.2.1 From 930102bb5d272318c64d28860b3ff0d7435aa79b Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Wed, 8 Dec 2010 10:01:04 +0000 Subject: Add 'return' stats --- src/rabbit_channel.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 0c8ad00a..ada63ca2 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1219,10 +1219,16 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, MsgSeqNo, Message, State) -> +process_routing_result(unroutable, _, MsgSeqNo, + Message = #basic_message{exchange_name = ExchangeName}, + State) -> + maybe_incr_stats([{ExchangeName, 1}], return, State), ok = basic_return(Message, State#ch.writer_pid, no_route), send_or_enqueue_ack(MsgSeqNo, undefined, State); -process_routing_result(not_delivered, _, MsgSeqNo, Message, State) -> +process_routing_result(not_delivered, _, MsgSeqNo, + Message = #basic_message{exchange_name = ExchangeName}, + State) -> + maybe_incr_stats([{ExchangeName, 1}], return, State), ok = basic_return(Message, State#ch.writer_pid, no_consumers), send_or_enqueue_ack(MsgSeqNo, undefined, State); process_routing_result(routed, [], MsgSeqNo, _, State) -> -- cgit v1.2.1 From 98c743c563055ded2d8dc34182b05795c295c17c Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Wed, 8 Dec 2010 13:09:26 +0000 Subject: moved maybe_incr_stats to basic_return --- src/rabbit_channel.erl | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index ada63ca2..50677fc6 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1070,11 +1070,12 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, basic_return(#basic_message{exchange_name = ExchangeName, routing_key = RoutingKey, content = Content}, - WriterPid, Reason) -> + State, Reason) -> + maybe_incr_stats([{ExchangeName, 1}], return, State), {_Close, ReplyCode, ReplyText} = rabbit_framing_amqp_0_9_1:lookup_amqp_exception(Reason), ok = rabbit_writer:send_command( - WriterPid, + State#ch.writer_pid, #'basic.return'{reply_code = ReplyCode, reply_text = ReplyText, exchange = ExchangeName#resource.name, @@ -1219,17 +1220,11 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, MsgSeqNo, - Message = #basic_message{exchange_name = ExchangeName}, - State) -> - maybe_incr_stats([{ExchangeName, 1}], return, State), - ok = basic_return(Message, State#ch.writer_pid, no_route), +process_routing_result(unroutable, _, MsgSeqNo, Message, State) -> + ok = basic_return(Message, State, no_route), send_or_enqueue_ack(MsgSeqNo, undefined, State); -process_routing_result(not_delivered, _, MsgSeqNo, - Message = #basic_message{exchange_name = ExchangeName}, - State) -> - maybe_incr_stats([{ExchangeName, 1}], return, State), - ok = basic_return(Message, State#ch.writer_pid, no_consumers), +process_routing_result(not_delivered, _, MsgSeqNo, Message, State) -> + ok = basic_return(Message, State, no_consumers), send_or_enqueue_ack(MsgSeqNo, undefined, State); process_routing_result(routed, [], MsgSeqNo, _, State) -> send_or_enqueue_ack(MsgSeqNo, undefined, State); -- cgit v1.2.1 From 6d72c379a0a1e5e2c258b411e6c905e52153c2ba Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 8 Dec 2010 15:57:42 +0000 Subject: Provide a means for plugins to add extra tables which are looked after by rabbit_mnesia --- src/rabbit_mnesia.erl | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a62e7a6f..65404aae 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -34,7 +34,8 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, - is_clustered/0, empty_ram_only_tables/0, copy_db/1]). + is_clustered/0, empty_ram_only_tables/0, copy_db/1, + add_table_definition/1]). -export([table_names/0]). @@ -210,7 +211,18 @@ table_definitions() -> {rabbit_queue, [{record_name, amqqueue}, {attributes, record_info(fields, amqqueue)}, - {match, #amqqueue{name = queue_name_match(), _='_'}}]}]. + {match, #amqqueue{name = queue_name_match(), _='_'}}]}] + ++ plugin_table_definitions(). + +add_table_definition(Def) -> + ok = application:set_env(rabbit, plugin_mnesia_tables, + [Def | plugin_table_definitions()], infinity). + +plugin_table_definitions() -> + case application:get_env(rabbit, plugin_mnesia_tables) of + {ok, Defs} -> Defs; + undefined -> [] + end. binding_match() -> #binding{source = exchange_name_match(), -- cgit v1.2.1 From 95898ec0f59c752ab73833b3444be5f6b2215371 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 8 Dec 2010 16:08:57 +0000 Subject: Add a TODO --- src/rabbit_mnesia.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 65404aae..bdc4d76c 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -214,6 +214,8 @@ table_definitions() -> {match, #amqqueue{name = queue_name_match(), _='_'}}]}] ++ plugin_table_definitions(). +%% TODO: re-work this abuse of the application env as a register with +%% the generic registry that should be landing at some point. add_table_definition(Def) -> ok = application:set_env(rabbit, plugin_mnesia_tables, [Def | plugin_table_definitions()], infinity). -- cgit v1.2.1 From c87841495733b9bdfde4ee8d8528f0979045b04a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 8 Dec 2010 16:26:54 +0000 Subject: On other node death, don't delete queues which still have some extra pids. --- src/rabbit_amqqueue.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e322e844..f525ffd1 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -492,9 +492,11 @@ on_node_down(Node) -> rabbit_binding:new_deletions(), rabbit_misc:execute_mnesia_transaction( fun () -> qlc:e(qlc:q([delete_queue(QueueName) || - #amqqueue{name = QueueName, pid = Pid} + #amqqueue{name = QueueName, pid = Pid, + extra_pids = EPids} <- mnesia:table(rabbit_queue), - node(Pid) == Node])) + node(Pid) == Node, + [] =:= EPids])) end))). delete_queue(QueueName) -> -- cgit v1.2.1 From 63f41f09a6d700d4e1714a96a074fb9f71091deb Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Wed, 8 Dec 2010 17:13:14 -0800 Subject: Clients can now override frame_max in RabbitMQ server. --- src/rabbit_reader.erl | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 4dd150a2..b6df9c98 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -50,7 +50,6 @@ -define(CLOSING_TIMEOUT, 1). -define(CHANNEL_TERMINATION_TIMEOUT, 3). -define(SILENT_CLOSE_DELAY, 3). --define(FRAME_MAX, 131072). %% set to zero once QPid fix their negotiation %--------------------------------------------------------------------------- @@ -739,7 +738,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism, sock = Sock}) -> User = rabbit_access_control:check_login(Mechanism, Response), Tune = #'connection.tune'{channel_max = 0, - frame_max = ?FRAME_MAX, + frame_max = my_frame_max(), heartbeat = 0}, ok = send_on_channel0(Sock, Tune, Protocol), State#v1{connection_state = tuning, @@ -752,14 +751,15 @@ handle_method0(#'connection.tune_ok'{frame_max = FrameMax, connection = Connection, sock = Sock, start_heartbeat_fun = SHF}) -> - if (FrameMax /= 0) and (FrameMax < ?FRAME_MIN_SIZE) -> + MyFrameMax = my_frame_max(), + if FrameMax /= 0 andalso FrameMax < ?FRAME_MIN_SIZE -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w < ~w min size", [FrameMax, ?FRAME_MIN_SIZE]); - (?FRAME_MAX /= 0) and (FrameMax > ?FRAME_MAX) -> + MyFrameMax /= 0 andalso FrameMax > MyFrameMax -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w > ~w max size", - [FrameMax, ?FRAME_MAX]); + [FrameMax, MyFrameMax]); true -> SendFun = fun() -> @@ -824,6 +824,14 @@ handle_method0(_Method, #v1{connection_state = S}) -> rabbit_misc:protocol_error( channel_error, "unexpected method in connection state ~w", [S]). +%% Compute frame_max for this instance. Could use 0, but breaks QPid Java +%% client. Default is 131072, but user can override in rabbitmq.config. +my_frame_max() -> + case application:get_env(rabbit, frame_max) of + {ok, FM} -> FM; + _ -> 131072 + end. + send_on_channel0(Sock, Method, Protocol) -> ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -- cgit v1.2.1 From 08bf37780a75601c8dd58d134a29215c4c5bc030 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Dec 2010 12:42:57 +0000 Subject: Allow gen_server2s to change their callback modules dynamically. That was easy. --- src/gen_server2.erl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 6e02b23e..a54df3b8 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -880,6 +880,10 @@ handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name, loop(GS2State #gs2_state { state = NState, time = Time1, debug = Debug1 }); + {become, Mod, NState} -> + loop(find_prioritisers( + GS2State #gs2_state { mod = Mod, + state = NState })); _ -> handle_common_termination(Reply, Msg, GS2State) end. -- cgit v1.2.1 From ca6ef9622a8268726504d348c85b6261d1a50d31 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Dec 2010 12:48:26 +0000 Subject: Increase the probability that the above works --- src/gen_server2.erl | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index a54df3b8..d6b09bab 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -881,9 +881,21 @@ handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name, time = Time1, debug = Debug1 }); {become, Mod, NState} -> + Debug1 = common_debug(Debug, fun print_event/3, Name, + {become, Mod, NState}), + loop(find_prioritisers( + GS2State #gs2_state { mod = Mod, + state = NState, + time = infinity, + debug = Debug1 })); + {become, Mod, NState, Time1} -> + Debug1 = common_debug(Debug, fun print_event/3, Name, + {become, Mod, NState}), loop(find_prioritisers( GS2State #gs2_state { mod = Mod, - state = NState })); + state = NState, + time = Time1, + debug = Debug1 })); _ -> handle_common_termination(Reply, Msg, GS2State) end. -- cgit v1.2.1 From 263b80a556fabe6927f85e793a207ab49adce887 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Dec 2010 16:41:20 +0000 Subject: Permit queue processes to be initialised with existing state. This raises the need for the ability to change the callbacks in rabbit_memory_monitor and fhc --- src/rabbit_amqqueue_process.erl | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index cecc85d0..027a82e7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -48,6 +48,8 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2, prioritise_info/2]). +-export([init_with_backing_queue_state/3]). + -import(queue). -import(erlang). -import(lists). @@ -133,6 +135,28 @@ init(Q) -> guid_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. +init_with_backing_queue_state(Q, BQ, BQS) -> + ?LOGDEBUG("Queue starting - ~p~n", [Q]), + process_flag(trap_exit, true), + ok = file_handle_cache:register_callback( + rabbit_amqqueue, set_maximum_since_use, [self()]), + ok = rabbit_memory_monitor:register( + self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), + process_args(#q{q = Q#amqqueue{pid = self()}, + exclusive_consumer = none, + has_had_consumers = false, + backing_queue = BQ, + backing_queue_state = BQS, + active_consumers = queue:new(), + blocked_consumers = queue:new(), + expires = undefined, + sync_timer_ref = undefined, + rate_timer_ref = undefined, + expiry_timer_ref = undefined, + ttl = undefined, + stats_timer = rabbit_event:init_stats_timer(), + guid_to_channel = dict:new()}). + terminate(shutdown, State = #q{backing_queue = BQ}) -> terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); terminate({shutdown, _}, State = #q{backing_queue = BQ}) -> -- cgit v1.2.1 From fc23d5651234bd999dd0388b1f8d7801908b817b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Dec 2010 17:01:06 +0000 Subject: Turns out the memory_monitor and fhc callback registration is idempotent, and it's also just simply easier to correctly fake the amqqueue_process API. Need to then deal with rate timers and pre hibernation callbacks. Groan --- src/rabbit_amqqueue_process.erl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 027a82e7..49856431 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -138,10 +138,6 @@ init(Q) -> init_with_backing_queue_state(Q, BQ, BQS) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), process_flag(trap_exit, true), - ok = file_handle_cache:register_callback( - rabbit_amqqueue, set_maximum_since_use, [self()]), - ok = rabbit_memory_monitor:register( - self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), process_args(#q{q = Q#amqqueue{pid = self()}, exclusive_consumer = none, has_had_consumers = false, -- cgit v1.2.1 From 322295618ae9593e10ae9fb8ee4eace840ef8a5e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Dec 2010 17:43:46 +0000 Subject: Predictions of parameter explosions are high --- src/rabbit_amqqueue_process.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 49856431..468a6ee3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -48,7 +48,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2, prioritise_info/2]). --export([init_with_backing_queue_state/3]). +-export([init_with_backing_queue_state/4]). -import(queue). -import(erlang). @@ -135,7 +135,7 @@ init(Q) -> guid_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -init_with_backing_queue_state(Q, BQ, BQS) -> +init_with_backing_queue_state(Q, BQ, BQS, RateTRef) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), process_flag(trap_exit, true), process_args(#q{q = Q#amqqueue{pid = self()}, @@ -147,7 +147,7 @@ init_with_backing_queue_state(Q, BQ, BQS) -> blocked_consumers = queue:new(), expires = undefined, sync_timer_ref = undefined, - rate_timer_ref = undefined, + rate_timer_ref = RateTRef, expiry_timer_ref = undefined, ttl = undefined, stats_timer = rabbit_event:init_stats_timer(), -- cgit v1.2.1 From 471480808bf862511f2277b7f2eb815db52c7342 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Dec 2010 17:55:51 +0000 Subject: Contract will be process is already trapping exits --- src/rabbit_amqqueue_process.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 468a6ee3..d781cd35 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -137,7 +137,6 @@ init(Q) -> init_with_backing_queue_state(Q, BQ, BQS, RateTRef) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), - process_flag(trap_exit, true), process_args(#q{q = Q#amqqueue{pid = self()}, exclusive_consumer = none, has_had_consumers = false, -- cgit v1.2.1 From b1bebd2cf15126d813995350334f7562fe3dcd84 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 12 Dec 2010 23:53:06 +0000 Subject: Alter the result of BQ:requeue so that it also returns the guids of the requeued messages. This makes it match the spec of ack closely and is more logical. Also, entirely coincidentally, happens to be necessary for HA... --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_amqqueue_process.erl | 4 +++- src/rabbit_tests.erl | 3 ++- src/rabbit_variable_queue.erl | 4 ++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 0f831a7d..a330fe1e 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -68,7 +68,7 @@ (rabbit_types:txn(), fun (() -> any()), message_properties_transformer(), state()) -> {[ack()], state()}). -spec(requeue/3 :: ([ack()], message_properties_transformer(), state()) - -> state()). + -> {[rabbit_guid:guid()], state()}). -spec(len/1 :: (state()) -> non_neg_integer()). -spec(is_empty/1 :: (state()) -> boolean()). -spec(set_ram_duration_target/2 :: diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index c1972c26..1c4a3716 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -544,7 +544,9 @@ deliver_or_enqueue(Delivery, State) -> requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> maybe_run_queue_via_backing_queue( fun (BQS) -> - BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS) + {_Guids, BQS1} = + BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS), + BQS1 end, State). fetch(AckRequired, State = #q{backing_queue_state = BQS, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 3343bb99..7f6c5d3d 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2047,7 +2047,8 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> VQ1 = rabbit_variable_queue:set_ram_duration_target(0, VQ0), VQ2 = variable_queue_publish(false, 4, VQ1), {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2), - VQ4 = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), + {_Guids, VQ4} = + rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:idle_timeout(VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), VQ7 = rabbit_variable_queue:init(test_amqqueue(true), true, diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d1da2c89..acbbe458 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -716,7 +716,7 @@ tx_commit(Txn, Fun, MsgPropsFun, end)}. requeue(AckTags, MsgPropsFun, State) -> - {_Guids, State1} = + {Guids, State1} = ack(fun msg_store_release/3, fun (#msg_status { msg = Msg, msg_props = MsgProps }, State1) -> {_SeqId, State2} = publish(Msg, MsgPropsFun(MsgProps), @@ -732,7 +732,7 @@ requeue(AckTags, MsgPropsFun, State) -> State3 end, AckTags, State), - a(reduce_memory_use(State1)). + {Guids, a(reduce_memory_use(State1))}. len(#vqstate { len = Len }) -> Len. -- cgit v1.2.1 From d8c97cf013c19cb19ba0d3235b4b030f4ed2690b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 13 Dec 2010 13:58:38 +0000 Subject: Extend state conversion function --- src/rabbit_amqqueue_process.erl | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 1c4a3716..a7468936 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -48,7 +48,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2, prioritise_info/2]). --export([init_with_backing_queue_state/4]). +-export([init_with_backing_queue_state/6]). % Queue's state -record(q, {q, @@ -131,22 +131,30 @@ init(Q) -> guid_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -init_with_backing_queue_state(Q, BQ, BQS, RateTRef) -> +init_with_backing_queue_state(Q, BQ, BQS, RateTRef, AckTags, Deliveries) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), - process_args(#q{q = Q#amqqueue{pid = self()}, - exclusive_consumer = none, - has_had_consumers = false, - backing_queue = BQ, - backing_queue_state = BQS, - active_consumers = queue:new(), - blocked_consumers = queue:new(), - expires = undefined, - sync_timer_ref = undefined, - rate_timer_ref = RateTRef, - expiry_timer_ref = undefined, - ttl = undefined, - stats_timer = rabbit_event:init_stats_timer(), - guid_to_channel = dict:new()}). + State = requeue_and_run( + AckTags, + process_args( + #q{q = Q#amqqueue{pid = self()}, + exclusive_consumer = none, + has_had_consumers = false, + backing_queue = BQ, + backing_queue_state = BQS, + active_consumers = queue:new(), + blocked_consumers = queue:new(), + expires = undefined, + sync_timer_ref = undefined, + rate_timer_ref = RateTRef, + expiry_timer_ref = undefined, + ttl = undefined, + stats_timer = rabbit_event:init_stats_timer(), + guid_to_channel = dict:new()})), + lists:foldl( + fun (Delivery, StateN) -> + {_Delivered, StateN1} = deliver_or_enqueue(Delivery, StateN), + StateN1 + end, State, Deliveries). terminate(shutdown, State = #q{backing_queue = BQ}) -> terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); -- cgit v1.2.1 From fbd954e2f89208dd033baf80d9dc8cd07bc5285c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 14 Dec 2010 17:52:13 +0000 Subject: inline --- src/rabbit_amqqueue.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 1e83265f..e4bc9f76 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -488,10 +488,9 @@ on_node_down(Node) -> rabbit_misc:execute_mnesia_transaction( fun () -> qlc:e(qlc:q([delete_queue(QueueName) || #amqqueue{name = QueueName, pid = Pid, - extra_pids = EPids} + extra_pids = []} <- mnesia:table(rabbit_queue), - node(Pid) == Node, - [] =:= EPids])) + node(Pid) == Node])) end))). delete_queue(QueueName) -> -- cgit v1.2.1 From d6d7b3bee95eee4137b041a3346b22988272b99e Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Tue, 14 Dec 2010 16:16:01 -0800 Subject: Put in Matthias's changes. --- ebin/rabbit_app.in | 1 + src/rabbit_reader.erl | 20 +++++++++----------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 6c33ef8b..d3808a54 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -19,6 +19,7 @@ {vm_memory_high_watermark, 0.4}, {msg_store_index_module, rabbit_msg_store_ets_index}, {backing_queue_module, rabbit_variable_queue}, + {frame_max, 131072}, {persister_max_wrap_entries, 500}, {persister_hibernate_after, 10000}, {msg_store_file_size_limit, 16777216}, diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index b6df9c98..cdb3586a 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -738,7 +738,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism, sock = Sock}) -> User = rabbit_access_control:check_login(Mechanism, Response), Tune = #'connection.tune'{channel_max = 0, - frame_max = my_frame_max(), + frame_max = server_frame_max(), heartbeat = 0}, ok = send_on_channel0(Sock, Tune, Protocol), State#v1{connection_state = tuning, @@ -751,15 +751,15 @@ handle_method0(#'connection.tune_ok'{frame_max = FrameMax, connection = Connection, sock = Sock, start_heartbeat_fun = SHF}) -> - MyFrameMax = my_frame_max(), + ServerFrameMax = server_frame_max(), if FrameMax /= 0 andalso FrameMax < ?FRAME_MIN_SIZE -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w < ~w min size", [FrameMax, ?FRAME_MIN_SIZE]); - MyFrameMax /= 0 andalso FrameMax > MyFrameMax -> + ServerFrameMax /= 0 andalso FrameMax > ServerFrameMax -> rabbit_misc:protocol_error( not_allowed, "frame_max=~w > ~w max size", - [FrameMax, MyFrameMax]); + [FrameMax, ServerFrameMax]); true -> SendFun = fun() -> @@ -824,13 +824,11 @@ handle_method0(_Method, #v1{connection_state = S}) -> rabbit_misc:protocol_error( channel_error, "unexpected method in connection state ~w", [S]). -%% Compute frame_max for this instance. Could use 0, but breaks QPid Java -%% client. Default is 131072, but user can override in rabbitmq.config. -my_frame_max() -> - case application:get_env(rabbit, frame_max) of - {ok, FM} -> FM; - _ -> 131072 - end. +%% Compute frame_max for this instance. Could simply use 0, but breaks +%% QPid Java client. +server_frame_max() -> + {ok, FrameMax} = application:get_env(rabbit, frame_max), + FrameMax. send_on_channel0(Sock, Method, Protocol) -> ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -- cgit v1.2.1 From c9a9f9d93ce6e66ff1c02ad5d03636307d9ecb68 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 16 Dec 2010 15:00:36 +0000 Subject: Move the various files from rabbit-ha to rabbit-server as the ha work can't exist as a plugin --- include/gm_specs.hrl | 28 + src/gm.erl | 1312 +++++++++++++++++++++++++++++++ src/gm_test.erl | 120 +++ src/rabbit_mirror_queue_coordinator.erl | 125 +++ src/rabbit_mirror_queue_master.erl | 221 ++++++ src/rabbit_mirror_queue_misc.erl | 42 + src/rabbit_mirror_queue_slave.erl | 481 +++++++++++ src/rabbit_mirror_queue_slave_sup.erl | 54 ++ 8 files changed, 2383 insertions(+) create mode 100644 include/gm_specs.hrl create mode 100644 src/gm.erl create mode 100644 src/gm_test.erl create mode 100644 src/rabbit_mirror_queue_coordinator.erl create mode 100644 src/rabbit_mirror_queue_master.erl create mode 100644 src/rabbit_mirror_queue_misc.erl create mode 100644 src/rabbit_mirror_queue_slave.erl create mode 100644 src/rabbit_mirror_queue_slave_sup.erl diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl new file mode 100644 index 00000000..7f607755 --- /dev/null +++ b/include/gm_specs.hrl @@ -0,0 +1,28 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-ifdef(use_specs). + +-type(callback_result() :: 'ok' | {'stop', any()}). +-type(args() :: [any()]). +-type(members() :: [pid()]). + +-spec(joined/2 :: (args(), members()) -> callback_result()). +-spec(members_changed/3 :: (args(), members(), members()) -> callback_result()). +-spec(handle_msg/3 :: (args(), pid(), any()) -> callback_result()). +-spec(terminate/2 :: (args(), term()) -> any()). + +-endif. diff --git a/src/gm.erl b/src/gm.erl new file mode 100644 index 00000000..47971bd4 --- /dev/null +++ b/src/gm.erl @@ -0,0 +1,1312 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(gm). + +%% Guaranteed Multicast +%% ==================== +%% +%% This module provides the ability to create named groups of +%% processes to which members can be dynamically added and removed, +%% and for messages to be broadcast within the group that are +%% guaranteed to reach all members of the group during the lifetime of +%% the message. The lifetime of a message is defined as being, at a +%% minimum, the time from which the message is first sent to any +%% member of the group, up until the time at which it is known by the +%% member who published the message that the message has reached all +%% group members. +%% +%% The guarantee given is that provided a message, once sent, makes it +%% to members who do not all leave the group, the message will +%% continue to propagate to all group members. +%% +%% Another way of stating the guarantee is that if member P publishes +%% messages m and m', then for all members P', if P' is a member of +%% the group prior to the publication of m, and P' receives m', then +%% P' will receive m. +%% +%% Note that only local-ordering is enforced: i.e. if member P sends +%% message m and then message m', then for-all members P', if P' +%% receives m and m', then they will receive m' after m. Causality +%% ordering is _not_ enforced. I.e. if member P receives message m +%% and as a result publishes message m', there is no guarantee that +%% other members P' will receive m before m'. +%% +%% +%% API Use +%% ------- +%% +%% Mnesia must be started. Use the idempotent create_tables/0 function +%% to create the tables required. +%% +%% start_link/3 +%% Provide the group name, the callback module name, and a list of any +%% arguments you wish to be passed into the callback module's +%% functions. The joined/1 will be called when we have joined the +%% group, and the list of arguments will have appended to it a list of +%% the current members of the group. See the comments in +%% behaviour_info/1 below for further details of the callback +%% functions. +%% +%% leave/1 +%% Provide the Pid. Removes the Pid from the group. The callback +%% terminate/1 function will be called. +%% +%% broadcast/2 +%% Provide the Pid and a Message. The message will be sent to all +%% members of the group as per the guarantees given above. This is a +%% cast and the function call will return immediately. There is no +%% guarantee that the message will reach any member of the group. +%% +%% confirmed_broadcast/2 +%% Provide the Pid and a Message. As per broadcast/2 except that this +%% is a call, not a cast, and only returns 'ok' once the Message has +%% reached every member of the group. Do not call +%% confirmed_broadcast/2 directly from the callback module otherwise +%% you will deadlock the entire group. +%% +%% group_members/1 +%% Provide the Pid. Returns a list of the current group members. +%% +%% +%% Implementation Overview +%% ----------------------- +%% +%% One possible means of implementation would be a fan-out from the +%% sender to every member of the group. This would require that the +%% group is fully connected, and, in the event that the original +%% sender of the message disappears from the group before the message +%% has made it to every member of the group, raises questions as to +%% who is responsible for sending on the message to new group members. +%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] - +%% if the sender dies part way through, who is responsible for +%% ensuring that the remaining Members receive the Msg? In the event +%% that within the group, messages sent are broadcast from a subset of +%% the members, the fan-out arrangement has the potential to +%% substantially impact the CPU and network workload of such members, +%% as such members would have to accommodate the cost of sending each +%% message to every group member. +%% +%% Instead, if the members of the group are arranged in a chain, then +%% it becomes easier to reason about who within the group has received +%% each message and who has not. It eases issues of responsibility: in +%% the event of a group member disappearing, the nearest upstream +%% member of the chain is responsible for ensuring that messages +%% continue to propagate down the chain. It also results in equal +%% distribution of sending and receiving workload, even if all +%% messages are being sent from just a single group member. This +%% configuration has the further advantage that it is not necessary +%% for every group member to know of every other group member, and +%% even that a group member does not have to be accessible from all +%% other group members. +%% +%% Performance is kept high by permitting pipelining and all +%% communication between joined group members is asynchronous. In the +%% chain A -> B -> C -> D, if A sends a message to the group, it will +%% not directly contact C or D. However, it must know that D receives +%% the message (in addition to B and C) before it can consider the +%% message fully sent. A simplistic implementation would require that +%% D replies to C, C replies to B and B then replies to A. This would +%% result in a propagation delay of twice the length of the chain. It +%% would also require, in the event of the failure of C, that D knows +%% to directly contact B and issue the necessary replies. Instead, the +%% chain forms a ring: D sends the message on to A: D does not +%% distinguish A as the sender, merely as the next member (downstream) +%% within the chain (which has now become a ring). When A receives +%% from D messages that A sent, it knows that all members have +%% received the message. However, the message is not dead yet: if C +%% died as B was sending to C, then B would need to detect the death +%% of C and forward the message on to D instead: thus every node has +%% to remember every message published until it is told that it can +%% forget about the message. This is essential not just for dealing +%% with failure of members, but also for the addition of new members. +%% +%% Thus once A receives the message back again, it then sends to B an +%% acknowledgement for the message, indicating that B can now forget +%% about the message. B does so, and forwards the ack to C. C forgets +%% the message, and forwards the ack to D, which forgets the message +%% and finally forwards the ack back to A. At this point, A takes no +%% further action: the message and its acknowledgement have made it to +%% every member of the group. The message is now dead, and any new +%% member joining the group at this point will not receive the +%% message. +%% +%% We therefore have two roles: +%% +%% 1. The sender, who upon receiving their own messages back, must +%% then send out acknowledgements, and upon receiving their own +%% acknowledgements back perform no further action. +%% +%% 2. The other group members who upon receiving messages and +%% acknowledgements must update their own internal state accordingly +%% (the sending member must also do this in order to be able to +%% accommodate failures), and forwards messages on to their downstream +%% neighbours. +%% +%% +%% Implementation: It gets trickier +%% -------------------------------- +%% +%% Chain A -> B -> C -> D +%% +%% A publishes a message which B receives. A now dies. B and D will +%% detect the death of A, and will link up, thus the chain is now B -> +%% C -> D. B forwards A's message on to C, who forwards it to D, who +%% forwards it to B. Thus B is now responsible for A's messages - both +%% publications and acknowledgements that were in flight at the point +%% at which A died. Even worse is that this is transitive: after B +%% forwards A's message to C, B dies as well. Now C is not only +%% responsible for B's in-flight messages, but is also responsible for +%% A's in-flight messages. +%% +%% Lemma 1: A member can only determine which dead members they have +%% inherited responsibility for if there is a total ordering on the +%% conflicting additions and subtractions of members from the group. +%% +%% Consider the simultaneous death of B and addition of B' that +%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or +%% C is responsible for in-flight messages from B. It is easy to +%% ensure that at least one of them thinks they have inherited B, but +%% if we do not ensure that exactly one of them inherits B, then we +%% could have B' converting publishes to acks, which then will crash C +%% as C does not believe it has issued acks for those messages. +%% +%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E +%% becoming A -> C' -> E. Who has inherited which of B, C and D? +%% +%% However, for non-conflicting membership changes, only a partial +%% ordering is required. For example, A -> B -> C becoming A -> A' -> +%% B. The addition of A', between A and B can have no conflicts with +%% the death of C: it is clear that A has inherited C's messages. +%% +%% For ease of implementation, we adopt the simple solution, of +%% imposing a total order on all membership changes. +%% +%% On the death of a member, it is ensured the dead member's +%% neighbours become aware of the death, and the upstream neighbour +%% now sends to its new downstream neighbour its state, including the +%% messages pending acknowledgement. The downstream neighbour can then +%% use this to calculate which publishes and acknowledgements it has +%% missed out on, due to the death of its old upstream. Thus the +%% downstream can catch up, and continues the propagation of messages +%% through the group. +%% +%% Lemma 2: When a member is joining, it must synchronously +%% communicate with its upstream member in order to receive its +%% starting state atomically with its addition to the group. +%% +%% New members must start with the same state as their nearest +%% upstream neighbour. This ensures that it is not surprised by +%% acknowledgements they are sent, and that should their downstream +%% neighbour die, they are able to send the correct state to their new +%% downstream neighbour to ensure it can catch up. Thus in the +%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' -> +%% C, A' must start with the state of A, so that it can send C the +%% correct state when B dies, allowing C to detect any missed +%% messages. +%% +%% If A' starts by adding itself to the group membership, A could then +%% die, without A' having received the necessary state from A. This +%% would leave A' responsible for in-flight messages from A, but +%% having the least knowledge of all, of those messages. Thus A' must +%% start by synchronously calling A, which then immediately sends A' +%% back its state. A then adds A' to the group. If A dies at this +%% point then A' will be able to see this (as A' will fail to appear +%% in the group membership), and thus A' will ignore the state it +%% receives from A, and will simply repeat the process, trying to now +%% join downstream from some other member. This ensures that should +%% the upstream die as soon as the new member has been joined, the new +%% member is guaranteed to receive the correct state, allowing it to +%% correctly process messages inherited due to the death of its +%% upstream neighbour. +%% +%% The canonical definition of the group membership is held by a +%% distributed database. Whilst this allows the total ordering of +%% changes to be achieved, it is nevertheless undesirable to have to +%% query this database for the current view, upon receiving each +%% message. Instead, we wish for members to be able to cache a view of +%% the group membership, which then requires a cache invalidation +%% mechanism. Each member maintains its own view of the group +%% membership. Thus when the group's membership changes, members may +%% need to become aware of such changes in order to be able to +%% accurately process messages they receive. Because of the +%% requirement of a total ordering of conflicting membership changes, +%% it is not possible to use the guaranteed broadcast mechanism to +%% communicate these changes: to achieve the necessary ordering, it +%% would be necessary for such messages to be published by exactly one +%% member, which can not be guaranteed given that such a member could +%% die. +%% +%% The total ordering we enforce on membership changes gives rise to a +%% view version number: every change to the membership creates a +%% different view, and the total ordering permits a simple +%% monotonically increasing view version number. +%% +%% Lemma 3: If a message is sent from a member that holds view version +%% N, it can be correctly processed by any member receiving the +%% message with a view version >= N. +%% +%% Initially, let us suppose that each view contains the ordering of +%% every member that was ever part of the group. Dead members are +%% marked as such. Thus we have a ring of members, some of which are +%% dead, and are thus inherited by the nearest alive downstream +%% member. +%% +%% In the chain A -> B -> C, all three members initially have view +%% version 1, which reflects reality. B publishes a message, which is +%% forward by C to A. B now dies, which A notices very quickly. Thus A +%% updates the view, creating version 2. It now forwards B's +%% publication, sending that message to its new downstream neighbour, +%% C. This happens before C is aware of the death of B. C must become +%% aware of the view change before it interprets the message its +%% received, otherwise it will fail to learn of the death of B, and +%% thus will not realise it has inherited B's messages (and will +%% likely crash). +%% +%% Thus very simply, we have that each subsequent view contains more +%% information than the preceding view. +%% +%% However, to avoid the views growing indefinitely, we need to be +%% able to delete members which have died _and_ for which no messages +%% are in-flight. This requires that upon inheriting a dead member, we +%% know the last publication sent by the dead member (this is easy: we +%% inherit a member because we are the nearest downstream member which +%% implies that we know at least as much than everyone else about the +%% publications of the dead member), and we know the earliest message +%% for which the acknowledgement is still in flight. +%% +%% In the chain A -> B -> C, when B dies, A will send to C its state +%% (as C is the new downstream from A), allowing C to calculate which +%% messages it has missed out on (described above). At this point, C +%% also inherits B's messages. If that state from A also includes the +%% last message published by B for which an acknowledgement has been +%% seen, then C knows exactly which further acknowledgements it must +%% receive (also including issuing acknowledgements for publications +%% still in-flight that it receives), after which it is known there +%% are no more messages in flight for B, thus all evidence that B was +%% ever part of the group can be safely removed from the canonical +%% group membership. +%% +%% Thus, for every message that a member sends, it includes with that +%% message its view version. When a member receives a message it will +%% update its view from the canonical copy, should its view be older +%% than the view version included in the message it has received. +%% +%% The state held by each member therefore includes the messages from +%% each publisher pending acknowledgement, the last publication seen +%% from that publisher, and the last acknowledgement from that +%% publisher. In the case of the member's own publications or +%% inherited members, this last acknowledgement seen state indicates +%% the last acknowledgement retired, rather than sent. +%% +%% +%% Proof sketch +%% ------------ +%% +%% We need to prove that with the provided operational semantics, we +%% can never reach a state that is not well formed from a well-formed +%% starting state. +%% +%% Operational semantics (small step): straight-forward message +%% sending, process monitoring, state updates. +%% +%% Well formed state: dead members inherited by exactly one non-dead +%% member; for every entry in anyone's pending-acks, either (the +%% publication of the message is in-flight downstream from the member +%% and upstream from the publisher) or (the acknowledgement of the +%% message is in-flight downstream from the publisher and upstream +%% from the member). +%% +%% Proof by induction on the applicable operational semantics. +%% +%% +%% Related work +%% ------------ +%% +%% The ring configuration and double traversal of messages around the +%% ring is similar (though developed independently) to the LCR +%% protocol by [Levy 2008]. However, LCR differs in several +%% ways. Firstly, by using vector clocks, it enforces a total order of +%% message delivery, which is unnecessary for our purposes. More +%% significantly, it is built on top of a "group communication system" +%% which performs the group management functions, taking +%% responsibility away from the protocol as to how to cope with safely +%% adding and removing members. When membership changes do occur, the +%% protocol stipulates that every member must perform communication +%% with every other member of the group, to ensure all outstanding +%% deliveries complete, before the entire group transitions to the new +%% view. This, in total, requires two sets of all-to-all synchronous +%% communications. +%% +%% This is not only rather inefficient, but also does not explain what +%% happens upon the failure of a member during this process. It does +%% though entirely avoid the need for inheritance of responsibility of +%% dead members that our protocol incorporates. +%% +%% In [Marandi et al 2010], a Paxos-based protocol is described. This +%% work explicitly focuses on the efficiency of communication. LCR +%% (and our protocol too) are more efficient, but at the cost of +%% higher latency. The Ring-Paxos protocol is itself built on top of +%% IP-multicast, which rules it out for many applications where +%% point-to-point communication is all that can be required. They also +%% have an excellent related work section which I really ought to +%% read... +%% +%% +%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008. +%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast +%% Protocol + + +-behaviour(gen_server2). + +-export([create_tables/0, start_link/3, leave/1, broadcast/2, + confirmed_broadcast/2, group_members/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([behaviour_info/1]). + +-export([add_to_rabbit_mnesia/0]). + +-define(GROUP_TABLE, gm_group). +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). +-define(SETS, ordsets). + +-record(state, + { self, + left, + right, + group_name, + module, + view, + pub_count, + members_state, + callback_args, + confirms + }). + +-record(gm_group, { name, version, members }). + +-record(view_member, { id, aliases, left, right }). + +-record(member, { pending_ack, last_pub, last_ack }). + +-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group}, + {attributes, record_info(fields, gm_group)}]}). +-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}). + +-rabbit_boot_step({gm_tables, + [{description, "add GM tables to rabbit_mnesia"}, + {mfa, {?MODULE, add_to_rabbit_mnesia, []}}, + {enables, database}]}). + +-define(TAG, '$gm'). + +-ifdef(use_specs). + +-export_type([group_name/0]). + +-type(group_name() :: any()). + +-spec(create_tables/0 :: () -> 'ok'). +-spec(start_link/3 :: (group_name(), atom(), [any()]) -> + {'ok', pid()} | {'error', any()}). +-spec(leave/1 :: (pid()) -> 'ok'). +-spec(broadcast/2 :: (pid(), any()) -> 'ok'). +-spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok'). +-spec(group_members/1 :: (pid()) -> [pid()]). + +-endif. + +behaviour_info(callbacks) -> + [ + %% Called when we've successfully joined the group. Supplied with + %% Args provided in start_link, plus current group members. + {joined, 2}, + + %% Supplied with Args provided in start_link, the list of new + %% members and the list of members previously known to us that + %% have since died. Note that if a member joins and dies very + %% quickly, it's possible that we will never see that member + %% appear in either births or deaths. However we are guaranteed + %% that (1) we will see a member joining either in the births + %% here, or in the members passed to joined/1 before receiving + %% any messages from it; and (2) we will not see members die that + %% we have not seen born (or supplied in the members to + %% joined/1). + {members_changed, 3}, + + %% Supplied with Args provided in start_link, the sender, and the + %% message. This does get called for messages injected by this + %% member, however, in such cases, there is no special + %% significance of this call: it does not indicate that the + %% message has made it to any other members, let alone all other + %% members. + {handle_msg, 3}, + + %% Called on gm member termination as per rules in gen_server, + %% with the Args provided in start_link plus the termination + %% Reason. + {terminate, 2} + ]; +behaviour_info(_Other) -> + undefined. + +create_tables() -> + create_tables([?TABLE]). + +create_tables([]) -> + ok; +create_tables([{Table, Attributes} | Tables]) -> + case mnesia:create_table(Table, Attributes) of + {atomic, ok} -> create_tables(Tables); + {aborted, {already_exists, gm_group}} -> create_tables(Tables); + Err -> Err + end. + +add_to_rabbit_mnesia() -> + {Name, Attributes} = ?TABLE, + ok = rabbit_mnesia:add_table_definition( + {Name, [?TABLE_MATCH | Attributes]}). + +start_link(GroupName, Module, Args) -> + gen_server2:start_link(?MODULE, [GroupName, Module, Args], []). + +leave(Server) -> + gen_server2:cast(Server, leave). + +broadcast(Server, Msg) -> + gen_server2:cast(Server, {broadcast, Msg}). + +confirmed_broadcast(Server, Msg) -> + gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity). + +group_members(Server) -> + gen_server2:call(Server, group_members, infinity). + + +init([GroupName, Module, Args]) -> + random:seed(now()), + gen_server2:cast(self(), join), + Self = self(), + {ok, #state { self = Self, + left = {Self, undefined}, + right = {Self, undefined}, + group_name = GroupName, + module = Module, + view = undefined, + pub_count = 0, + members_state = undefined, + callback_args = Args, + confirms = queue:new() }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + + +handle_call({confirmed_broadcast, _Msg}, _From, + State = #state { members_state = undefined }) -> + reply(not_joined, State); + +handle_call({confirmed_broadcast, Msg}, _From, + State = #state { self = Self, + right = {Self, undefined}, + module = Module, + callback_args = Args }) -> + handle_callback_result({Module:handle_msg(Args, Self, Msg), ok, State}); + +handle_call({confirmed_broadcast, Msg}, From, State) -> + internal_broadcast(Msg, From, State); + +handle_call(group_members, _From, + State = #state { members_state = undefined }) -> + reply(not_joined, State); + +handle_call(group_members, _From, State = #state { view = View }) -> + reply(alive_view_members(View), State); + +handle_call({add_on_right, _NewMember}, _From, + State = #state { members_state = undefined }) -> + reply(not_ready, State); + +handle_call({add_on_right, NewMember}, _From, + State = #state { self = Self, + group_name = GroupName, + view = View, + members_state = MembersState, + module = Module, + callback_args = Args }) -> + Group = record_new_member_in_group( + GroupName, Self, NewMember, + fun (Group1) -> + View1 = group_to_view(Group1), + ok = send_right(NewMember, View1, + {catchup, Self, prepare_members_state( + MembersState)}) + end), + View2 = group_to_view(Group), + State1 = check_neighbours(State #state { view = View2 }), + Result = callback_view_changed(Args, Module, View, View2), + handle_callback_result({Result, {ok, Group}, State1}). + + +handle_cast({?TAG, ReqVer, Msg}, + State = #state { view = View, + group_name = GroupName, + module = Module, + callback_args = Args }) -> + {Result, State1} = + case needs_view_update(ReqVer, View) of + true -> + View1 = group_to_view(read_group(GroupName)), + {callback_view_changed(Args, Module, View, View1), + check_neighbours(State #state { view = View1 })}; + false -> + {ok, State} + end, + handle_callback_result( + if_callback_success( + Result, + fun (_Result1, State2) -> handle_msg(Msg, State2) end, + fun (Result1, State2) -> {Result1, State2} end, + State1)); + +handle_cast({broadcast, _Msg}, State = #state { members_state = undefined }) -> + noreply(State); + +handle_cast({broadcast, Msg}, + State = #state { self = Self, + right = {Self, undefined}, + module = Module, + callback_args = Args }) -> + handle_callback_result({Module:handle_msg(Args, Self, Msg), State}); + +handle_cast({broadcast, Msg}, State) -> + internal_broadcast(Msg, none, State); + +handle_cast(join, State = #state { self = Self, + group_name = GroupName, + members_state = undefined, + module = Module, + callback_args = Args }) -> + View = join_group(Self, GroupName), + MembersState = + case alive_view_members(View) of + [Self] -> blank_member_state(); + _ -> undefined + end, + State1 = check_neighbours(State #state { view = View, + members_state = MembersState }), + handle_callback_result( + {Module:joined(Args, all_known_members(View)), State1}); + +handle_cast(leave, State) -> + {stop, normal, State}. + + +handle_info({'DOWN', MRef, process, _Pid, _Reason}, + State = #state { self = Self, + left = Left, + right = Right, + group_name = GroupName, + confirms = Confirms }) -> + Member = case {Left, Right} of + {{Member1, MRef}, _} -> Member1; + {_, {Member1, MRef}} -> Member1; + _ -> undefined + end, + case Member of + undefined -> + noreply(State); + _ -> + View1 = + group_to_view(record_dead_member_in_group(Member, GroupName)), + State1 = State #state { view = View1 }, + {Result, State2} = + case alive_view_members(View1) of + [Self] -> + maybe_erase_aliases( + State1 #state { + members_state = blank_member_state(), + confirms = purge_confirms(Confirms) }); + _ -> + {ok, State1} + end, + handle_callback_result({Result, check_neighbours(State2)}) + end. + + +terminate(Reason, #state { module = Module, + callback_args = Args }) -> + Module:terminate(Args, Reason). + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +handle_msg(check_neighbours, State) -> + %% no-op - it's already been done by the calling handle_cast + {ok, State}; + +handle_msg({catchup, Left, MembersStateLeft}, + State = #state { self = Self, + left = {Left, _MRefL}, + right = {Right, _MRefR}, + view = View, + members_state = undefined }) -> + ok = send_right(Right, View, {catchup, Self, MembersStateLeft}), + MembersStateLeft1 = build_members_state(MembersStateLeft), + {ok, State #state { members_state = MembersStateLeft1 }}; + +handle_msg({catchup, Left, MembersStateLeft}, + State = #state { self = Self, + left = {Left, _MRefL}, + view = View, + members_state = MembersState }) + when MembersState =/= undefined -> + MembersStateLeft1 = build_members_state(MembersStateLeft), + AllMembers = lists:usort(dict:fetch_keys(MembersState) ++ + dict:fetch_keys(MembersStateLeft1)), + {MembersState1, Activity} = + lists:foldl( + fun (Id, MembersStateActivity) -> + #member { pending_ack = PALeft, last_ack = LA } = + find_member_or_blank(Id, MembersStateLeft1), + with_member_acc( + fun (#member { pending_ack = PA } = Member, Activity1) -> + case is_member_alias(Id, Self, View) of + true -> + {_AcksInFlight, Pubs, _PA1} = + find_prefix_common_suffix(PALeft, PA), + {Member #member { last_ack = LA }, + activity_cons(Id, pubs_from_queue(Pubs), + [], Activity1)}; + false -> + {Acks, _Common, Pubs} = + find_prefix_common_suffix(PA, PALeft), + {Member, + activity_cons(Id, pubs_from_queue(Pubs), + acks_from_queue(Acks), + Activity1)} + end + end, Id, MembersStateActivity) + end, {MembersState, activity_nil()}, AllMembers), + handle_msg({activity, Left, activity_finalise(Activity)}, + State #state { members_state = MembersState1 }); + +handle_msg({catchup, _NotLeft, _MembersState}, State) -> + {ok, State}; + +handle_msg({activity, Left, Activity}, + State = #state { self = Self, + left = {Left, _MRefL}, + module = Module, + view = View, + members_state = MembersState, + confirms = Confirms, + callback_args = Args }) + when MembersState =/= undefined -> + {MembersState1, {Confirms1, Activity1}} = + lists:foldl( + fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) -> + with_member_acc( + fun (Member = #member { pending_ack = PA, + last_pub = LP, + last_ack = LA }, + {Confirms2, Activity2}) -> + case is_member_alias(Id, Self, View) of + true -> + {ToAck, PA1} = + find_common(queue_from_pubs(Pubs), PA, + queue:new()), + LA1 = last_ack(Acks, LA), + AckNums = acks_from_queue(ToAck), + Confirms3 = maybe_confirm( + Self, Id, Confirms2, AckNums), + {Member #member { pending_ack = PA1, + last_ack = LA1 }, + {Confirms3, + activity_cons( + Id, [], AckNums, Activity2)}}; + false -> + PA1 = apply_acks(Acks, join_pubs(PA, Pubs)), + LA1 = last_ack(Acks, LA), + LP1 = last_pub(Pubs, LP), + {Member #member { pending_ack = PA1, + last_pub = LP1, + last_ack = LA1 }, + {Confirms2, + activity_cons(Id, Pubs, Acks, Activity2)}} + end + end, Id, MembersStateConfirmsActivity) + end, {MembersState, {Confirms, activity_nil()}}, Activity), + State1 = State #state { members_state = MembersState1, + confirms = Confirms1 }, + Activity3 = activity_finalise(Activity1), + {Result, State2} = maybe_erase_aliases(State1), + ok = maybe_send_activity(Activity3, State2), + if_callback_success( + Result, + fun (_Result1, State3) -> {callback(Args, Module, Activity3), State3} end, + fun (Result1, State3) -> {Result1, State3} end, + State2); + +handle_msg({activity, _NotLeft, _Activity}, State) -> + {ok, State}. + + +noreply(State) -> + ok = a(State), + {noreply, State, hibernate}. + +reply(Reply, State) -> + ok = a(State), + {reply, Reply, State, hibernate}. + +a(#state { view = undefined }) -> + ok; +a(#state { self = Self, + left = {Left, _MRefL}, + view = View }) -> + #view_member { left = Left } = fetch_view_member(Self, View), + ok. + +internal_broadcast(Msg, From, State = #state { self = Self, + pub_count = PubCount, + members_state = MembersState, + module = Module, + confirms = Confirms, + callback_args = Args }) -> + PubMsg = {PubCount, Msg}, + Activity = activity_cons(Self, [PubMsg], [], activity_nil()), + ok = maybe_send_activity(activity_finalise(Activity), State), + MembersState1 = + with_member( + fun (Member = #member { pending_ack = PA }) -> + Member #member { pending_ack = queue:in(PubMsg, PA) } + end, Self, MembersState), + Confirms1 = case From of + none -> Confirms; + _ -> queue:in({PubCount, From}, Confirms) + end, + handle_callback_result({Module:handle_msg(Args, Self, Msg), + State #state { pub_count = PubCount + 1, + members_state = MembersState1, + confirms = Confirms1 }}). + + +%% --------------------------------------------------------------------------- +%% View construction and inspection +%% --------------------------------------------------------------------------- + +needs_view_update(ReqVer, {Ver, _View}) -> + Ver < ReqVer. + +view_version({Ver, _View}) -> + Ver. + +is_member_alive({dead, _Member}) -> false; +is_member_alive(_) -> true. + +is_member_alias(Self, Self, _View) -> + true; +is_member_alias(Member, Self, View) -> + ?SETS:is_element(Member, + ((fetch_view_member(Self, View)) #view_member.aliases)). + +dead_member_id({dead, Member}) -> Member. + +store_view_member(VMember = #view_member { id = Id }, {Ver, View}) -> + {Ver, dict:store(Id, VMember, View)}. + +with_view_member(Fun, View, Id) -> + store_view_member(Fun(fetch_view_member(Id, View)), View). + +fetch_view_member(Id, {_Ver, View}) -> + dict:fetch(Id, View). + +find_view_member(Id, {_Ver, View}) -> + dict:find(Id, View). + +blank_view(Ver) -> + {Ver, dict:new()}. + +alive_view_members({_Ver, View}) -> + dict:fetch_keys(View). + +all_known_members({_Ver, View}) -> + dict:fold( + fun (Member, #view_member { aliases = Aliases }, Acc) -> + ?SETS:to_list(Aliases) ++ [Member | Acc] + end, [], View). + +group_to_view(#gm_group { members = Members, version = Ver }) -> + Alive = lists:filter(fun is_member_alive/1, Members), + [_|_] = Alive, %% ASSERTION - can't have all dead members + add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members). + +link_view([Left, Middle, Right | Rest], View) -> + case find_view_member(Middle, View) of + error -> + link_view( + [Middle, Right | Rest], + store_view_member(#view_member { id = Middle, + aliases = ?SETS:new(), + left = Left, + right = Right }, View)); + {ok, _} -> + View + end; +link_view(_, View) -> + View. + +add_aliases(View, Members) -> + Members1 = ensure_alive_suffix(Members), + {EmptyDeadSet, View1} = + lists:foldl( + fun (Member, {DeadAcc, ViewAcc}) -> + case is_member_alive(Member) of + true -> + {?SETS:new(), + with_view_member( + fun (VMember = + #view_member { aliases = Aliases }) -> + VMember #view_member { + aliases = ?SETS:union(Aliases, DeadAcc) } + end, ViewAcc, Member)}; + false -> + {?SETS:add_element(dead_member_id(Member), DeadAcc), + ViewAcc} + end + end, {?SETS:new(), View}, Members1), + 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION + View1. + +ensure_alive_suffix(Members) -> + queue:to_list(ensure_alive_suffix1(queue:from_list(Members))). + +ensure_alive_suffix1(MembersQ) -> + {{value, Member}, MembersQ1} = queue:out_r(MembersQ), + case is_member_alive(Member) of + true -> MembersQ; + false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1)) + end. + + +%% --------------------------------------------------------------------------- +%% View modification +%% --------------------------------------------------------------------------- + +join_group(Self, GroupName) -> + join_group(Self, GroupName, read_group(GroupName)). + +join_group(Self, GroupName, {error, not_found}) -> + join_group(Self, GroupName, prune_or_create_group(Self, GroupName)); +join_group(Self, _GroupName, #gm_group { members = [Self] } = Group) -> + group_to_view(Group); +join_group(Self, GroupName, #gm_group { members = Members } = Group) -> + case lists:member(Self, Members) of + true -> + group_to_view(Group); + false -> + case lists:filter(fun is_member_alive/1, Members) of + [] -> + join_group(Self, GroupName, + prune_or_create_group(Self, GroupName)); + Alive -> + Left = lists:nth(random:uniform(length(Alive)), Alive), + try + case gen_server2:call( + Left, {add_on_right, Self}, infinity) of + {ok, Group1} -> group_to_view(Group1); + not_ready -> join_group(Self, GroupName) + end + catch + exit:{R, _} + when R =:= noproc; R =:= normal; R =:= shutdown -> + join_group( + Self, GroupName, + record_dead_member_in_group(Left, GroupName)) + end + end + end. + +read_group(GroupName) -> + case mnesia:dirty_read(?GROUP_TABLE, GroupName) of + [] -> {error, not_found}; + [Group] -> Group + end. + +prune_or_create_group(Self, GroupName) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> GroupNew = #gm_group { name = GroupName, + members = [Self], + version = 0 }, + case mnesia:read(?GROUP_TABLE, GroupName) of + [] -> + mnesia:write(GroupNew), + GroupNew; + [Group1 = #gm_group { members = Members }] -> + case lists:any(fun is_member_alive/1, Members) of + true -> Group1; + false -> mnesia:write(GroupNew), + GroupNew + end + end + end), + Group. + +record_dead_member_in_group(Member, GroupName) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = + mnesia:read(?GROUP_TABLE, GroupName), + case lists:splitwith( + fun (Member1) -> Member1 =/= Member end, Members) of + {_Members1, []} -> %% not found - already recorded dead + Group1; + {Members1, [Member | Members2]} -> + Members3 = Members1 ++ [{dead, Member} | Members2], + Group2 = Group1 #gm_group { members = Members3, + version = Ver + 1 }, + mnesia:write(Group2), + Group2 + end + end), + Group. + +record_new_member_in_group(GroupName, Left, NewMember, Fun) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> + [#gm_group { members = Members, version = Ver } = Group1] = + mnesia:read(?GROUP_TABLE, GroupName), + {Prefix, [Left | Suffix]} = + lists:splitwith(fun (M) -> M =/= Left end, Members), + Members1 = Prefix ++ [Left, NewMember | Suffix], + Group2 = Group1 #gm_group { members = Members1, + version = Ver + 1 }, + ok = Fun(Group2), + mnesia:write(Group2), + Group2 + end), + Group. + +erase_members_in_group(Members, GroupName) -> + DeadMembers = [{dead, Id} || Id <- Members], + {atomic, Group} = + mnesia:sync_transaction( + fun () -> + [Group1 = #gm_group { members = [_|_] = Members1, + version = Ver }] = + mnesia:read(?GROUP_TABLE, GroupName), + case Members1 -- DeadMembers of + Members1 -> Group1; + Members2 -> Group2 = + Group1 #gm_group { members = Members2, + version = Ver + 1 }, + mnesia:write(Group2), + Group2 + end + end), + Group. + +maybe_erase_aliases(State = #state { self = Self, + group_name = GroupName, + view = View, + members_state = MembersState, + module = Module, + callback_args = Args }) -> + #view_member { aliases = Aliases } = fetch_view_member(Self, View), + {Erasable, MembersState1} + = ?SETS:fold( + fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> + #member { last_pub = LP, last_ack = LA } = + find_member_or_blank(Id, MembersState), + case can_erase_view_member(Self, Id, LA, LP) of + true -> {[Id | ErasableAcc], + erase_member(Id, MembersStateAcc)}; + false -> Acc + end + end, {[], MembersState}, Aliases), + State1 = State #state { members_state = MembersState1 }, + case Erasable of + [] -> {ok, State1}; + _ -> View1 = group_to_view( + erase_members_in_group(Erasable, GroupName)), + {callback_view_changed(Args, Module, View, View1), + State1 #state { view = View1 }} + end. + +can_erase_view_member(Self, Self, _LA, _LP) -> false; +can_erase_view_member(_Self, _Id, N, N) -> true; +can_erase_view_member(_Self, _Id, _LA, _LP) -> false. + + +%% --------------------------------------------------------------------------- +%% View monitoring and maintanence +%% --------------------------------------------------------------------------- + +ensure_neighbour(_Ver, Self, {Self, undefined}, Self) -> + {Self, undefined}; +ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) -> + ok = gen_server2:cast(RealNeighbour, {?TAG, Ver, check_neighbours}), + {RealNeighbour, maybe_monitor(RealNeighbour, Self)}; +ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) -> + {RealNeighbour, MRef}; +ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) -> + true = erlang:demonitor(MRef), + Msg = {?TAG, Ver, check_neighbours}, + ok = gen_server2:cast(RealNeighbour, Msg), + ok = case Neighbour of + Self -> ok; + _ -> gen_server2:cast(Neighbour, Msg) + end, + {Neighbour, maybe_monitor(Neighbour, Self)}. + +maybe_monitor(Self, Self) -> + undefined; +maybe_monitor(Other, _Self) -> + erlang:monitor(process, Other). + +check_neighbours(State = #state { self = Self, + left = Left, + right = Right, + view = View }) -> + #view_member { left = VLeft, right = VRight } + = fetch_view_member(Self, View), + Ver = view_version(View), + Left1 = ensure_neighbour(Ver, Self, Left, VLeft), + Right1 = ensure_neighbour(Ver, Self, Right, VRight), + State1 = State #state { left = Left1, right = Right1 }, + ok = maybe_send_catchup(Right, State1), + State1. + +maybe_send_catchup(Right, #state { right = Right }) -> + ok; +maybe_send_catchup(_Right, #state { self = Self, + right = {Self, undefined} }) -> + ok; +maybe_send_catchup(_Right, #state { members_state = undefined }) -> + ok; +maybe_send_catchup(_Right, #state { self = Self, + right = {Right, _MRef}, + view = View, + members_state = MembersState }) -> + send_right(Right, View, + {catchup, Self, prepare_members_state(MembersState)}). + + +%% --------------------------------------------------------------------------- +%% Catch_up delta detection +%% --------------------------------------------------------------------------- + +find_prefix_common_suffix(A, B) -> + {Prefix, A1} = find_prefix(A, B, queue:new()), + {Common, Suffix} = find_common(A1, B, queue:new()), + {Prefix, Common, Suffix}. + +%% Returns the elements of A that occur before the first element of B, +%% plus the remainder of A. +find_prefix(A, B, Prefix) -> + case {queue:out(A), queue:out(B)} of + {{{value, Val}, _A1}, {{value, Val}, _B1}} -> + {Prefix, A}; + {{empty, A1}, {{value, _A}, _B1}} -> + {Prefix, A1}; + {{{value, {NumA, _MsgA} = Val}, A1}, + {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB -> + find_prefix(A1, B, queue:in(Val, Prefix)); + {_, {empty, _B1}} -> + {A, Prefix} %% Prefix well be empty here + end. + +%% A should be a prefix of B. Returns the commonality plus the +%% remainder of B. +find_common(A, B, Common) -> + case {queue:out(A), queue:out(B)} of + {{{value, Val}, A1}, {{value, Val}, B1}} -> + find_common(A1, B1, queue:in(Val, Common)); + {{empty, _A}, _} -> + {Common, B} + end. + + +%% --------------------------------------------------------------------------- +%% Members helpers +%% --------------------------------------------------------------------------- + +with_member(Fun, Id, MembersState) -> + store_member( + Id, Fun(find_member_or_blank(Id, MembersState)), MembersState). + +with_member_acc(Fun, Id, {MembersState, Acc}) -> + {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc), + {store_member(Id, MemberState, MembersState), Acc1}. + +find_member_or_blank(Id, MembersState) -> + case dict:find(Id, MembersState) of + {ok, Result} -> Result; + error -> blank_member() + end. + +erase_member(Id, MembersState) -> + dict:erase(Id, MembersState). + +blank_member() -> + #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }. + +blank_member_state() -> + dict:new(). + +store_member(Id, MemberState, MembersState) -> + dict:store(Id, MemberState, MembersState). + +prepare_members_state(MembersState) -> + dict:to_list(MembersState). + +build_members_state(MembersStateList) -> + dict:from_list(MembersStateList). + + +%% --------------------------------------------------------------------------- +%% Activity assembly +%% --------------------------------------------------------------------------- + +activity_nil() -> + queue:new(). + +activity_cons(_Id, [], [], Tail) -> + Tail; +activity_cons(Sender, Pubs, Acks, Tail) -> + queue:in({Sender, Pubs, Acks}, Tail). + +activity_finalise(Activity) -> + queue:to_list(Activity). + +maybe_send_activity([], _State) -> + ok; +maybe_send_activity(Activity, #state { self = Self, + right = {Right, _MRefR}, + view = View }) -> + send_right(Right, View, {activity, Self, Activity}). + +send_right(Right, View, Msg) -> + ok = gen_server2:cast(Right, {?TAG, view_version(View), Msg}). + +callback(Args, Module, Activity) -> + lists:foldl( + fun ({Id, Pubs, _Acks}, ok) -> + lists:foldl(fun ({_PubNum, Pub}, ok) -> + Module:handle_msg(Args, Id, Pub); + (_, Error) -> + Error + end, ok, Pubs); + (_, Error) -> + Error + end, ok, Activity). + +callback_view_changed(Args, Module, OldView, NewView) -> + OldMembers = all_known_members(OldView), + NewMembers = all_known_members(NewView), + Births = NewMembers -- OldMembers, + Deaths = OldMembers -- NewMembers, + case {Births, Deaths} of + {[], []} -> ok; + _ -> Module:members_changed(Args, Births, Deaths) + end. + +handle_callback_result({Result, State}) -> + if_callback_success( + Result, + fun (_Result, State1) -> noreply(State1) end, + fun ({stop, Reason}, State1) -> {stop, Reason, State1} end, + State); +handle_callback_result({Result, Reply, State}) -> + if_callback_success( + Result, + fun (_Result, State1) -> reply(Reply, State1) end, + fun ({stop, Reason}, State1) -> {stop, Reason, Reply, State1} end, + State). + +if_callback_success(ok, True, _False, State) -> + True(ok, State); +if_callback_success({become, Module, Args} = Result, True, _False, State) -> + True(Result, State #state { module = Module, + callback_args = Args }); +if_callback_success({stop, _Reason} = Result, _True, False, State) -> + False(Result, State). + +maybe_confirm(_Self, _Id, Confirms, []) -> + Confirms; +maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) -> + case queue:out(Confirms) of + {empty, _Confirms} -> + Confirms; + {{value, {PubNum, From}}, Confirms1} -> + gen_server2:reply(From, ok), + maybe_confirm(Self, Self, Confirms1, PubNums); + {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum -> + maybe_confirm(Self, Self, Confirms, PubNums) + end; +maybe_confirm(_Self, _Id, Confirms, _PubNums) -> + Confirms. + +purge_confirms(Confirms) -> + [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)], + queue:new(). + + +%% --------------------------------------------------------------------------- +%% Msg transformation +%% --------------------------------------------------------------------------- + +acks_from_queue(Q) -> + [PubNum || {PubNum, _Msg} <- queue:to_list(Q)]. + +pubs_from_queue(Q) -> + queue:to_list(Q). + +queue_from_pubs(Pubs) -> + queue:from_list(Pubs). + +apply_acks([], Pubs) -> + Pubs; +apply_acks([PubNum | Acks], Pubs) -> + {{value, {PubNum, _Msg}}, Pubs1} = queue:out(Pubs), + apply_acks(Acks, Pubs1). + +join_pubs(Q, []) -> + Q; +join_pubs(Q, Pubs) -> + queue:join(Q, queue_from_pubs(Pubs)). + +last_ack([], LA) -> + LA; +last_ack(List, LA) -> + LA1 = lists:last(List), + true = LA1 > LA, %% ASSERTION + LA1. + +last_pub([], LP) -> + LP; +last_pub(List, LP) -> + {PubNum, _Msg} = lists:last(List), + true = PubNum > LP, %% ASSERTION + PubNum. diff --git a/src/gm_test.erl b/src/gm_test.erl new file mode 100644 index 00000000..aebfbb69 --- /dev/null +++ b/src/gm_test.erl @@ -0,0 +1,120 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(gm_test). + +-export([test/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +get_state() -> + get(state). + +with_state(Fun) -> + put(state, Fun(get_state())). + +inc() -> + case 1 + get(count) of + 100000 -> Now = os:timestamp(), + Start = put(ts, Now), + Diff = timer:now_diff(Now, Start), + Rate = 100000 / (Diff / 1000000), + io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), + put(count, 0); + N -> put(count, N) + end. + +joined([], Members) -> + io:format("Joined ~p (~p members)~n", [self(), length(Members)]), + put(state, dict:from_list([{Member, empty} || Member <- Members])), + put(count, 0), + put(ts, os:timestamp()), + ok. + +members_changed([], Births, Deaths) -> + with_state( + fun (State) -> + State1 = + lists:foldl( + fun (Born, StateN) -> + false = dict:is_key(Born, StateN), + dict:store(Born, empty, StateN) + end, State, Births), + lists:foldl( + fun (Died, StateN) -> + true = dict:is_key(Died, StateN), + dict:erase(Died, StateN) + end, State1, Deaths) + end), + ok. + +handle_msg([], From, {test_msg, Num}) -> + inc(), + with_state( + fun (State) -> + ok = case dict:find(From, State) of + {ok, empty} -> ok; + {ok, Num} -> ok; + {ok, Num1} when Num < Num1 -> + exit({{from, From}, + {duplicate_delivery_of, Num1}, + {expecting, Num}}); + {ok, Num1} -> + exit({{from, From}, + {missing_delivery_of, Num}, + {received_early, Num1}}) + end, + dict:store(From, Num + 1, State) + end), + ok. + +terminate([], Reason) -> + io:format("Left ~p (~p)~n", [self(), Reason]), + ok. + +spawn_member() -> + spawn_link( + fun () -> + random:seed(now()), + %% start up delay of no more than 10 seconds + timer:sleep(random:uniform(10000)), + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), + Start = random:uniform(10000), + send_loop(Pid, Start, Start + random:uniform(10000)), + gm:leave(Pid), + spawn_more() + end). + +spawn_more() -> + [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. + +send_loop(_Pid, Target, Target) -> + ok; +send_loop(Pid, Count, Target) when Target > Count -> + case random:uniform(3) of + 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); + _ -> gm:broadcast(Pid, {test_msg, Count}) + end, + timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms + send_loop(Pid, Count + 1, Target). + +test() -> + ok = gm:create_tables(), + spawn_member(), + spawn_member(). diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl new file mode 100644 index 00000000..fb650144 --- /dev/null +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -0,0 +1,125 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(rabbit_mirror_queue_coordinator). + +-export([start_link/2, add_slave/2, get_gm/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([joined/2, members_changed/3, handle_msg/3]). + +-behaviour(gen_server2). +-behaviour(gm). + +-include("rabbit.hrl"). +-include("gm_specs.hrl"). + +-record(state, { q, + gm + }). + +-define(ONE_SECOND, 1000). + +start_link(Queue, GM) -> + gen_server2:start_link(?MODULE, [Queue, GM], []). + +add_slave(CPid, SlaveNode) -> + gen_server2:cast(CPid, {add_slave, SlaveNode}). + +get_gm(CPid) -> + gen_server2:call(CPid, get_gm, infinity). + +%% --------------------------------------------------------------------------- +%% gen_server +%% --------------------------------------------------------------------------- + +init([#amqqueue { name = QueueName } = Q, GM]) -> + GM1 = case GM of + undefined -> + ok = gm:create_tables(), + {ok, GM2} = gm:start_link(QueueName, ?MODULE, [self()]), + receive {joined, GM2, _Members} -> + ok + end, + GM2; + _ -> + true = link(GM), + GM + end, + {ok, _TRef} = + timer:apply_interval(?ONE_SECOND, gm, broadcast, [GM1, heartbeat]), + {ok, #state { q = Q, gm = GM1 }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + +handle_call(get_gm, _From, State = #state { gm = GM }) -> + reply(GM, State). + +handle_cast({add_slave, Node}, State = #state { q = Q }) -> + Result = rabbit_mirror_queue_slave_sup:start_child(Node, [Q]), + rabbit_log:info("Adding slave node for queue ~p: ~p~n", + [Q #amqqueue.name, Result]), + noreply(State); + +handle_cast({gm_deaths, Deaths}, + State = #state { q = #amqqueue { name = QueueName } }) -> + rabbit_log:info("Master ~p saw deaths ~p for queue ~p~n", + [self(), Deaths, QueueName]), + Node = node(), + Node = node(rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths)), + noreply(State). + +handle_info(Msg, State) -> + {stop, {unexpected_info, Msg}, State}. + +terminate(_Reason, #state{}) -> + %% gen_server case + ok; +terminate([_CPid], _Reason) -> + %% gm case + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%% --------------------------------------------------------------------------- +%% GM +%% --------------------------------------------------------------------------- + +joined([CPid], Members) -> + CPid ! {joined, self(), Members}, + ok. + +members_changed([_CPid], _Births, []) -> + ok; +members_changed([CPid], _Births, Deaths) -> + ok = gen_server2:cast(CPid, {gm_deaths, Deaths}). + +handle_msg([_CPid], _From, heartbeat) -> + ok; +handle_msg([_CPid], _From, _Msg) -> + ok. + +%% --------------------------------------------------------------------------- +%% Others +%% --------------------------------------------------------------------------- + +noreply(State) -> + {noreply, State, hibernate}. + +reply(Reply, State) -> + {reply, Reply, State, hibernate}. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl new file mode 100644 index 00000000..2299c3d1 --- /dev/null +++ b/src/rabbit_mirror_queue_master.erl @@ -0,0 +1,221 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(rabbit_mirror_queue_master). + +-export([init/2, terminate/1, delete_and_terminate/1, + purge/1, publish/4, publish_delivered/5, fetch/2, ack/2, + tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, + requeue/3, len/1, is_empty/1, dropwhile/2, + set_ram_duration_target/2, ram_duration/1, + needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, + status/1]). + +-export([start/1, stop/0]). + +-export([promote_backing_queue_state/4]). + +-behaviour(rabbit_backing_queue). + +-include("rabbit.hrl"). + +-record(state, { gm, + coordinator, + backing_queue, + backing_queue_state, + set_delivered + }). + +%% --------------------------------------------------------------------------- +%% Backing queue +%% --------------------------------------------------------------------------- + +start(_DurableQueues) -> + %% This will never get called as this module will never be + %% installed as the default BQ implementation. + exit({not_valid_for_generic_backing_queue, ?MODULE}). + +stop() -> + %% Same as start/1. + exit({not_valid_for_generic_backing_queue, ?MODULE}). + +init(#amqqueue { arguments = Args, durable = false } = Q, Recover) -> + {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, undefined), + GM = rabbit_mirror_queue_coordinator:get_gm(CPid), + {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), + [rabbit_mirror_queue_coordinator:add_slave(CPid, binary_to_atom(Node, utf8)) + || {longstr, Node} <- Nodes], + {ok, BQ} = application:get_env(backing_queue_module), + BQS = BQ:init(Q, Recover), + #state { gm = GM, + coordinator = CPid, + backing_queue = BQ, + backing_queue_state = BQS, + set_delivered = 0 }. + +promote_backing_queue_state(CPid, BQ, BQS, GM) -> + #state { gm = GM, + coordinator = CPid, + backing_queue = BQ, + backing_queue_state = BQS, + set_delivered = BQ:len(BQS) }. + +terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> + %% Backing queue termination. The queue is going down but + %% shouldn't be deleted. Most likely safe shutdown of this + %% node. Thus just let some other slave take over. + State #state { backing_queue_state = BQ:terminate(BQS) }. + +delete_and_terminate(State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, delete_and_terminate), + State #state { backing_queue_state = BQ:delete_and_terminate(BQS), + set_delivered = 0 }. + +purge(State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {set_length, 0}), + {Count, BQS1} = BQ:purge(BQS), + {Count, State #state { backing_queue_state = BQS1, + set_delivered = 0 }}. + +publish(Msg = #basic_message { guid = Guid }, + MsgProps, ChPid, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {publish, false, Guid, MsgProps, ChPid}), + BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), + State #state { backing_queue_state = BQS1 }. + +publish_delivered(AckRequired, Msg = #basic_message { guid = Guid }, + MsgProps, ChPid, + State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {publish, {true, AckRequired}, Guid, MsgProps, ChPid}), + {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), + {AckTag, State #state { backing_queue_state = BQS1 }}. + +dropwhile(Fun, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + set_delivered = SetDelivered }) -> + Len = BQ:len(BQS), + BQS1 = BQ:dropwhile(Fun, BQS), + Dropped = Len - BQ:len(BQS1), + SetDelivered1 = lists:max([0, SetDelivered - Dropped]), + ok = gm:broadcast(GM, {set_length, BQ:len(BQS1)}), + State #state { backing_queue_state = BQS1, + set_delivered = SetDelivered1 }. + +fetch(AckRequired, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + set_delivered = SetDelivered }) -> + {Result, BQS1} = BQ:fetch(AckRequired, BQS), + State1 = State #state { backing_queue_state = BQS1 }, + case Result of + empty -> + {Result, State1}; + {#basic_message { guid = Guid } = Message, IsDelivered, AckTag, + Remaining} -> + ok = gm:broadcast(GM, {fetch, AckRequired, Guid, Remaining}), + IsDelivered1 = IsDelivered orelse SetDelivered > 0, + SetDelivered1 = lists:max([0, SetDelivered - 1]), + {{Message, IsDelivered1, AckTag, Remaining}, + State1 #state { set_delivered = SetDelivered1 }} + end. + +ack(AckTags, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + {Guids, BQS1} = BQ:ack(AckTags, BQS), + case Guids of + [] -> ok; + _ -> ok = gm:broadcast(GM, {ack, Guids}) + end, + {Guids, State #state { backing_queue_state = BQS1 }}. + +tx_publish(Txn, Msg, MsgProps, ChPid, #state {} = State) -> + %% gm:broadcast(GM, {tx_publish, Txn, Guid, MsgProps, ChPid}) + State. + +tx_ack(Txn, AckTags, #state {} = State) -> + %% gm:broadcast(GM, {tx_ack, Txn, Guids}) + State. + +tx_rollback(Txn, #state {} = State) -> + %% gm:broadcast(GM, {tx_rollback, Txn}) + {[], State}. + +tx_commit(Txn, PostCommitFun, MsgPropsFun, #state {} = State) -> + %% Maybe don't want to transmit the MsgPropsFun but what choice do + %% we have? OTOH, on the slaves, things won't be expiring on their + %% own (props are interpreted by amqqueue, not vq), so if the msg + %% props aren't quite the same, that doesn't matter. + %% + %% The PostCommitFun is actually worse - we need to prevent that + %% from being invoked until we have confirmation from all the + %% slaves that they've done everything up to there. + %% + %% In fact, transactions are going to need work seeing as it's at + %% this point that VQ mentions amqqueue, which will thus not work + %% on the slaves - we need to make sure that all the slaves do the + %% tx_commit_post_msg_store at the same point, and then when they + %% all confirm that (scatter/gather), we can finally invoke the + %% PostCommitFun. + %% + %% Another idea is that the slaves are actually driven with + %% pubacks and thus only the master needs to support txns + %% directly. + {[], State}. + +requeue(AckTags, MsgPropsFun, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + {Guids, BQS1} = BQ:requeue(AckTags, MsgPropsFun, BQS), + ok = gm:broadcast(GM, {requeue, MsgPropsFun, Guids}), + {Guids, State #state { backing_queue_state = BQS1 }}. + +len(#state { backing_queue = BQ, backing_queue_state = BQS}) -> + BQ:len(BQS). + +is_empty(#state { backing_queue = BQ, backing_queue_state = BQS}) -> + BQ:is_empty(BQS). + +set_ram_duration_target(Target, State = #state { backing_queue = BQ, + backing_queue_state = BQS}) -> + State #state { backing_queue_state = + BQ:set_ram_duration_target(Target, BQS) }. + +ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> + {Result, BQS1} = BQ:ram_duration(BQS), + {Result, State #state { backing_queue_state = BQS1 }}. + +needs_idle_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> + BQ:needs_idle_timeout(BQS). + +idle_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> + BQ:idle_timeout(BQS). + +handle_pre_hibernate(State = #state { backing_queue = BQ, + backing_queue_state = BQS}) -> + State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }. + +status(#state { backing_queue = BQ, backing_queue_state = BQS}) -> + BQ:status(BQS). diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl new file mode 100644 index 00000000..d37ebe1f --- /dev/null +++ b/src/rabbit_mirror_queue_misc.erl @@ -0,0 +1,42 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(rabbit_mirror_queue_misc). + +-export([remove_from_queue/2]). + +-include("rabbit.hrl"). + +remove_from_queue(QueueName, DeadPids) -> + DeadNodes = [node(DeadPid) || DeadPid <- DeadPids], + rabbit_misc:execute_mnesia_transaction( + fun () -> + [Q = #amqqueue { pid = QPid, + extra_pids = EPids }] = + mnesia:read({rabbit_queue, QueueName}), + [QPid1 | EPids1] = + [Pid || Pid <- [QPid | EPids], + not lists:member(node(Pid), DeadNodes)], + case {{QPid, EPids}, {QPid1, EPids1}} of + {Same, Same} -> + QPid; + _ -> + Q1 = Q #amqqueue { pid = QPid1, + extra_pids = EPids1 }, + mnesia:write(rabbit_queue, Q1, write), + QPid1 + end + end). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl new file mode 100644 index 00000000..452cbd5a --- /dev/null +++ b/src/rabbit_mirror_queue_slave.erl @@ -0,0 +1,481 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(rabbit_mirror_queue_slave). + +%% We join the GM group before we add ourselves to the amqqueue +%% record. As a result: +%% 1. We can receive msgs from GM that correspond to messages we will +%% never receive from publishers. +%% 2. When we receive a message from publishers, we must receive a +%% message from the GM group for it. +%% 3. However, that instruction from the GM group can arrive either +%% before or after the actual message. We need to be able to +%% distinguish between GM instructions arriving early, and case (1) +%% above. +%% +%% All instructions from the GM group must be processed in the order +%% in which they're received. +%% +%% Thus, we need a queue per sender, and a queue for GM instructions. +%% +%% On receipt of a GM group instruction, three things are possible: +%% 1. The queue of publisher messages is empty. Thus store the GM +%% instruction to the instrQ. +%% 2. The head of the queue of publisher messages has a message that +%% matches the GUID of the GM instruction. Remove the message, and +%% route appropriately. +%% 3. The head of the queue of publisher messages has a message that +%% does not match the GUID of the GM instruction. Throw away the GM +%% instruction: the GM instruction must correspond to a message +%% that we'll never receive. If it did not, then before the current +%% instruction, we would have received an instruction for the +%% message at the head of this queue, thus the head of the queue +%% would have been removed and processed. +%% +%% On receipt of a publisher message, three things are possible: +%% 1. The queue of GM group instructions is empty. Add the message to +%% the relevant queue and await instructions from the GM. +%% 2. The head of the queue of GM group instructions has an +%% instruction matching the GUID of the message. Remove that +%% instruction and act on it. Attempt to process the rest of the +%% instrQ. +%% 3. The head of the queue of GM group instructions has an +%% instruction that does not match the GUID of the message. If the +%% message is from the same publisher as is referred to by the +%% instruction then throw away the GM group instruction and repeat +%% - attempt to match against the next instruction if there is one: +%% The instruction thrown away was for a message we'll never +%% receive. +%% +%% In all cases, we are relying heavily on order preserving messaging +%% both from the GM group and from the publishers. + +-export([start_link/1, set_maximum_since_use/2]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3, handle_pre_hibernate/1]). + +-export([joined/2, members_changed/3, handle_msg/3]). + +-behaviour(gen_server2). +-behaviour(gm). + +-include("rabbit.hrl"). +-include("gm_specs.hrl"). + +-record(state, { q, + gm, + master_node, + backing_queue, + backing_queue_state, + rate_timer_ref, + + sender_queues, %% :: Pid -> MsgQ + guid_ack, %% :: Guid -> AckTag + instructions %% :: InstrQ + }). + +-define(RAM_DURATION_UPDATE_INTERVAL, 5000). + +start_link(Q) -> + gen_server2:start_link(?MODULE, [Q], []). + +set_maximum_since_use(QPid, Age) -> + gen_server2:cast(QPid, {set_maximum_since_use, Age}). + +init([#amqqueue { name = QueueName } = Q]) -> + process_flag(trap_exit, true), %% amqqueue_process traps exits too. + ok = gm:create_tables(), + {ok, GM} = gm:start_link(QueueName, ?MODULE, [self()]), + receive {joined, GM} -> + ok + end, + Self = self(), + Node = node(), + case rabbit_misc:execute_mnesia_transaction( + fun () -> + [Q1 = #amqqueue { pid = QPid, extra_pids = EPids }] = + mnesia:read({rabbit_queue, QueueName}), + case [Pid || Pid <- [QPid | EPids], node(Pid) =:= Node] of + [] -> + EPids1 = EPids ++ [Self], + mnesia:write(rabbit_queue, + Q1 #amqqueue { extra_pids = EPids1 }, + write), + {ok, QPid}; + _ -> + {error, node_already_present} + end + end) of + {ok, MPid} -> + ok = file_handle_cache:register_callback( + rabbit_amqqueue, set_maximum_since_use, [self()]), + ok = rabbit_memory_monitor:register( + self(), {rabbit_amqqueue, set_ram_duration_target, + [self()]}), + {ok, BQ} = application:get_env(backing_queue_module), + BQS = BQ:init(Q, false), + {ok, #state { q = Q, + gm = GM, + master_node = node(MPid), + backing_queue = BQ, + backing_queue_state = BQS, + rate_timer_ref = undefined, + + sender_queues = dict:new(), + guid_ack = dict:new(), + instructions = queue:new() + }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, + ?DESIRED_HIBERNATE}}; + {error, Error} -> + {stop, Error} + end. + +handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) -> + %% Synchronous, "immediate" delivery mode + gen_server2:reply(From, false), %% master may deliver it, not us + handle_process_result(enqueue_message(Delivery, State)); + +handle_call({deliver, Delivery = #delivery {}}, From, State) -> + %% Synchronous, "mandatory" delivery mode + gen_server2:reply(From, true), %% amqqueue throws away the result anyway + handle_process_result(enqueue_message(Delivery, State)); + +handle_call({gm_deaths, Deaths}, From, + State = #state { q = #amqqueue { name = QueueName }, + gm = GM, + master_node = MNode }) -> + rabbit_log:info("Slave ~p saw deaths ~p for queue ~p~n", + [self(), Deaths, QueueName]), + case {node(), node(rabbit_mirror_queue_misc:remove_from_queue( + QueueName, Deaths))} of + {_Node, MNode} -> + reply(ok, State); + {Node, Node} -> + promote_me(From, State); + {_Node, MNode1} -> + gen_server2:reply(From, ok), + ok = gm:broadcast(GM, heartbeat), + noreply(State #state { master_node = MNode1 }) + end. + + +handle_cast({gm, Instruction}, State = #state { instructions = InstrQ }) -> + State1 = State #state { instructions = queue:in(Instruction, InstrQ) }, + case queue:is_empty(InstrQ) of + true -> handle_process_result(process_instructions(State1)); + false -> noreply(State1) + end; + +handle_cast({deliver, Delivery = #delivery {}}, State) -> + %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. + handle_process_result(enqueue_message(Delivery, State)); + +handle_cast({set_maximum_since_use, Age}, State) -> + ok = file_handle_cache:set_maximum_since_use(Age), + noreply(State); + +handle_cast({set_ram_duration_target, Duration}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + BQS1 = BQ:set_ram_duration_target(Duration, BQS), + noreply(State #state { backing_queue_state = BQS1 }); + +handle_cast(update_ram_duration, + State = #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + noreply(State #state { rate_timer_ref = just_measured, + backing_queue_state = BQS2 }). + +handle_info(Msg, State) -> + {stop, {unexpected_info, Msg}, State}. + +%% If the Reason is shutdown, or {shutdown, _}, it is not the queue +%% being deleted: it's just the node going down. Even though we're a +%% slave, we have no idea whether or not we'll be the only copy coming +%% back up. Thus we must assume we will be, and preserve anything we +%% have on disk. +terminate(Reason, #state { q = Q, + gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + rate_timer_ref = RateTRef }) -> + ok = gm:leave(GM), + QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( + Q, BQ, BQS, RateTRef, [], []), + rabbit_amqqueue_process:terminate(Reason, QueueState); +terminate([_SPid], _Reason) -> + %% gm case + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_pre_hibernate(State = #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + %% mainly copied from amqqueue_process + BQS1 = BQ:handle_pre_hibernate(BQS), + %% no activity for a while == 0 egress and ingress rates + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), infinity), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS2 })}. + +%% --------------------------------------------------------------------------- +%% GM +%% --------------------------------------------------------------------------- + +joined([SPid], _Members) -> + SPid ! {joined, self()}, + ok. + +members_changed([_SPid], _Births, []) -> + ok; +members_changed([SPid], _Births, Deaths) -> + rabbit_misc:with_exit_handler( + fun () -> {stop, normal} end, + fun () -> + case gen_server2:call(SPid, {gm_deaths, Deaths}) of + ok -> + ok; + {promote, CPid} -> + {become, rabbit_mirror_queue_coordinator, [CPid]} + end + end). + +handle_msg([_SPid], _From, heartbeat) -> + ok; +handle_msg([SPid], _From, Msg) -> + ok = gen_server2:cast(SPid, {gm, Msg}). + +%% --------------------------------------------------------------------------- +%% Others +%% --------------------------------------------------------------------------- + +handle_process_result({continue, State}) -> noreply(State); +handle_process_result({stop, State}) -> {stop, normal, State}. + +promote_me(From, #state { q = Q, + gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + rate_timer_ref = RateTRef, + sender_queues = SQ, + guid_ack = GA }) -> + rabbit_log:info("Promoting slave ~p for queue ~p~n", + [self(), Q #amqqueue.name]), + {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), + true = unlink(GM), + gen_server2:reply(From, {promote, CPid}), + ok = gm:confirmed_broadcast(GM, heartbeat), + MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( + CPid, BQ, BQS, GM), + %% We have to do the requeue via this init because otherwise we + %% don't have access to the relevent MsgPropsFun. Also, we are + %% already in mnesia as the master queue pid. Thus we cannot just + %% publish stuff by sending it to ourself - we must pass it + %% through to this init, otherwise we can violate ordering + %% constraints. + AckTags = [AckTag || {_Guid, AckTag} <- dict:to_list(GA)], + Deliveries = lists:append([queue:to_list(PubQ) + || {_ChPid, PubQ} <- dict:to_list(SQ)]), + QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( + Q, rabbit_mirror_queue_master, MasterState, RateTRef, + AckTags, Deliveries), + {become, rabbit_amqqueue_process, QueueState, hibernate}. + +noreply(State) -> + {noreply, next_state(State), hibernate}. + +reply(Reply, State) -> + {reply, Reply, next_state(State), hibernate}. + +next_state(State) -> + ensure_rate_timer(State). + +%% copied+pasted from amqqueue_process +ensure_rate_timer(State = #state { rate_timer_ref = undefined }) -> + {ok, TRef} = timer:apply_after( + ?RAM_DURATION_UPDATE_INTERVAL, + rabbit_amqqueue, update_ram_duration, + [self()]), + State #state { rate_timer_ref = TRef }; +ensure_rate_timer(State = #state { rate_timer_ref = just_measured }) -> + State #state { rate_timer_ref = undefined }; +ensure_rate_timer(State) -> + State. + +stop_rate_timer(State = #state { rate_timer_ref = undefined }) -> + State; +stop_rate_timer(State = #state { rate_timer_ref = just_measured }) -> + State #state { rate_timer_ref = undefined }; +stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> + {ok, cancel} = timer:cancel(TRef), + State #state { rate_timer_ref = undefined }. + +enqueue_message(Delivery = #delivery { sender = ChPid }, + State = #state { sender_queues = SQ }) -> + Q = case dict:find(ChPid, SQ) of + {ok, Q1} -> Q1; + error -> queue:new() + end, + SQ1 = dict:store(ChPid, queue:in(Delivery, Q), SQ), + State1 = State #state { sender_queues = SQ1 }, + case queue:is_empty(Q) of + true -> process_instructions(State1); + false -> {continue, State1} + end. + +process_instructions(State = #state { instructions = InstrQ }) -> + case queue:out(InstrQ) of + {empty, _InstrQ} -> + {continue, State}; + {{value, Instr}, InstrQ1} -> + case process_instruction(Instr, State) of + {processed, State1} -> + process_instructions( + State1 #state { instructions = InstrQ1 }); + {stop, State1} -> + {stop, State1 #state { instructions = InstrQ1 }}; + blocked -> + {continue, State} + end + end. + +process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, + State = #state { sender_queues = SQ, + backing_queue = BQ, + backing_queue_state = BQS, + guid_ack = GA }) -> + case dict:find(ChPid, SQ) of + error -> + blocked; + {ok, Q} -> + case queue:out(Q) of + {empty, _Q} -> + blocked; + {{value, #delivery { + message = Msg = #basic_message { guid = Guid } }}, Q1} -> + State1 = State #state { sender_queues = + dict:store(ChPid, Q1, SQ) }, + {processed, + case Deliver of + false -> + BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), + State1 #state {backing_queue_state = BQS1 }; + {true, AckRequired} -> + {AckTag, BQS1} = BQ:publish_delivered( + AckRequired, Msg, MsgProps, + ChPid, BQS), + GA1 = case AckRequired of + true -> dict:store(Guid, AckTag, GA); + false -> GA + end, + State1 #state { backing_queue_state = BQS1, + guid_ack = GA1 } + end}; + {{value, #delivery {}}, _Q1} -> + %% throw away the instruction: we'll never receive + %% the message to which it corresponds. + {processed, State} + end + end; +process_instruction({set_length, Length}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + QLen = BQ:len(BQS), + ToDrop = QLen - Length, + {processed, + case ToDrop > 0 of + true -> BQS1 = lists:foldl( + fun (const, BQSN) -> BQ:fetch(false, BQSN) end, + BQS, lists:duplicate(ToDrop, const)), + State #state { backing_queue_state = BQS1 }; + false -> State + end}; +process_instruction({fetch, AckRequired, Guid, Remaining}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS, + guid_ack = GA }) -> + QLen = BQ:len(BQS), + {processed, + case QLen - 1 of + Remaining -> + {{_Msg, _IsDelivered, AckTag, Remaining}, BQS1} = + BQ:fetch(AckRequired, BQS), + GA1 = case AckRequired of + true -> dict:store(Guid, AckTag, GA); + false -> GA + end, + State #state { backing_queue_state = BQS1, + guid_ack = GA1 }; + Other when Other < Remaining -> + %% we must be shorter than the master + State + end}; +process_instruction({ack, Guids}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS, + guid_ack = GA }) -> + {AckTags, GA1} = guids_to_acktags(Guids, GA), + {Guids1, BQS1} = BQ:ack(AckTags, BQS), + [] = Guids1 -- Guids, %% ASSERTION + {processed, State #state { guid_ack = GA1, + backing_queue_state = BQS1 }}; +process_instruction({requeue, MsgPropsFun, Guids}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS, + guid_ack = GA }) -> + {AckTags, GA1} = guids_to_acktags(Guids, GA), + {processed, + case length(AckTags) =:= length(Guids) of + true -> + {Guids, BQS1} = BQ:requeue(AckTags, MsgPropsFun, BQS), + State #state { guid_ack = GA1, + backing_queue_state = BQS1 }; + false -> + %% the only thing we can safely do is nuke out our BQ and + %% GA + {_Count, BQS1} = BQ:purge(BQS), + {Guids, BQS2} = ack_all(BQ, GA, BQS1), + State #state { guid_ack = dict:new(), + backing_queue_state = BQS2 } + end}; +process_instruction(delete_and_terminate, + State = #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + {stop, State #state { + backing_queue_state = BQ:delete_and_terminate(BQS) }}. + +guids_to_acktags(Guids, GA) -> + {AckTags, GA1} = + lists:foldl(fun (Guid, {AckTagsN, GAN}) -> + case dict:find(Guid, GA) of + error -> {AckTagsN, GAN}; + {ok, AckTag} -> {[AckTag | AckTagsN], + dict:erase(Guid, GAN)} + end + end, {[], GA}, Guids), + {lists:reverse(AckTags), GA1}. + +ack_all(BQ, GA, BQS) -> + BQ:ack([AckTag || {_Guid, AckTag} <- dict:to_list(GA)], BQS). diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl new file mode 100644 index 00000000..6658e6c3 --- /dev/null +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -0,0 +1,54 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(rabbit_mirror_queue_slave_sup). + +-rabbit_boot_step({mirror_queue_slave_sup, + [{description, "mirror queue slave sup"}, + {mfa, {rabbit_mirror_queue_slave_sup, start, []}}, + {requires, core_initialized}, + {enables, queue_sup_queue_recovery}]}). + +-behaviour(supervisor2). + +-export([start/0, start_link/0, start_child/2]). + +-export([init/1]). + +-include_lib("rabbit.hrl"). + +-define(SERVER, ?MODULE). + +start() -> + {ok, _} = + supervisor:start_child( + rabbit_sup, + {rabbit_mirror_queue_slave_sup, + {rabbit_mirror_queue_slave_sup, start_link, []}, + transient, infinity, supervisor, [rabbit_mirror_queue_slave_sup]}), + ok. + +start_link() -> + supervisor2:start_link({local, ?SERVER}, ?MODULE, []). + +start_child(Node, Args) -> + supervisor2:start_child({?SERVER, Node}, Args). + +init([]) -> + {ok, {{simple_one_for_one_terminate, 10, 10}, + [{rabbit_mirror_queue_slave, + {rabbit_mirror_queue_slave, start_link, []}, + temporary, ?MAX_WAIT, worker, [rabbit_mirror_queue_slave]}]}}. -- cgit v1.2.1 From 7e7eebb1ee7c4911b2250a02373cd8ff6fca3351 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 16 Dec 2010 15:37:57 +0000 Subject: Make use of the wonderful new registry. --- src/gm.erl | 12 +++++++----- src/rabbit.erl | 13 ++++++------- src/rabbit_mnesia.erl | 20 ++++++++------------ src/rabbit_registry.erl | 3 ++- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 47971bd4..d41b7a1c 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -372,6 +372,7 @@ -behaviour(gen_server2). +-behaviour(rabbit_mnesia). -export([create_tables/0, start_link/3, leave/1, broadcast/2, confirmed_broadcast/2, group_members/1]). @@ -381,7 +382,7 @@ -export([behaviour_info/1]). --export([add_to_rabbit_mnesia/0]). +-export([table_definitions/0]). -define(GROUP_TABLE, gm_group). -define(HIBERNATE_AFTER_MIN, 1000). @@ -413,7 +414,9 @@ -rabbit_boot_step({gm_tables, [{description, "add GM tables to rabbit_mnesia"}, - {mfa, {?MODULE, add_to_rabbit_mnesia, []}}, + {mfa, {rabbit_registry, register, + [mnesia, <<"gm">>, ?MODULE]}}, + {requires, rabbit_registry}, {enables, database}]}). -define(TAG, '$gm'). @@ -480,10 +483,9 @@ create_tables([{Table, Attributes} | Tables]) -> Err -> Err end. -add_to_rabbit_mnesia() -> +table_definitions() -> {Name, Attributes} = ?TABLE, - ok = rabbit_mnesia:add_table_definition( - {Name, [?TABLE_MATCH | Attributes]}). + [{Name, [?TABLE_MATCH | Attributes]}]. start_link(GroupName, Module, Args) -> gen_server2:start_link(?MODULE, [GroupName, Module, Args], []). diff --git a/src/rabbit.erl b/src/rabbit.erl index 2ebfdecf..d46c62b6 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -51,6 +51,12 @@ []}}, {enables, external_infrastructure}]}). +-rabbit_boot_step({rabbit_registry, + [{description, "plugin registry"}, + {mfa, {rabbit_sup, start_child, + [rabbit_registry]}}, + {enables, external_infrastructure}]}). + -rabbit_boot_step({database, [{mfa, {rabbit_mnesia, init, []}}, {enables, external_infrastructure}]}). @@ -69,13 +75,6 @@ -rabbit_boot_step({external_infrastructure, [{description, "external infrastructure ready"}]}). --rabbit_boot_step({rabbit_registry, - [{description, "plugin registry"}, - {mfa, {rabbit_sup, start_child, - [rabbit_registry]}}, - {requires, external_infrastructure}, - {enables, kernel_ready}]}). - -rabbit_boot_step({rabbit_log, [{description, "logging server"}, {mfa, {rabbit_sup, start_restartable_child, diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c97988d0..1d2c3640 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -34,11 +34,12 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, - is_clustered/0, empty_ram_only_tables/0, copy_db/1, - add_table_definition/1]). + is_clustered/0, empty_ram_only_tables/0, copy_db/1]). -export([table_names/0]). +-export([behaviour_info/1]). + %% create_tables/0 exported for helping embed RabbitMQ in or alongside %% other mnesia-using Erlang applications, such as ejabberd -export([create_tables/0]). @@ -72,6 +73,9 @@ %%---------------------------------------------------------------------------- +behaviour_info(callbacks) -> [{table_definitions, 0}]; +behaviour_info(_Other) -> undefined. + status() -> [{nodes, case mnesia:system_info(is_running) of yes -> [{Key, Nodes} || @@ -214,17 +218,9 @@ table_definitions() -> {match, #amqqueue{name = queue_name_match(), _='_'}}]}] ++ plugin_table_definitions(). -%% TODO: re-work this abuse of the application env as a register with -%% the generic registry that should be landing at some point. -add_table_definition(Def) -> - ok = application:set_env(rabbit, plugin_mnesia_tables, - [Def | plugin_table_definitions()], infinity). - plugin_table_definitions() -> - case application:get_env(rabbit, plugin_mnesia_tables) of - {ok, Defs} -> Defs; - undefined -> [] - end. + lists:append([Mod:table_definitions() + || {_Type, Mod} <- rabbit_registry:lookup_all(mnesia)]). binding_match() -> #binding{source = exchange_name_match(), diff --git a/src/rabbit_registry.erl b/src/rabbit_registry.erl index 7a3fcb51..935cf1d0 100644 --- a/src/rabbit_registry.erl +++ b/src/rabbit_registry.erl @@ -111,7 +111,8 @@ sanity_check_module(ClassModule, Module) -> end. class_module(exchange) -> rabbit_exchange_type; -class_module(auth_mechanism) -> rabbit_auth_mechanism. +class_module(auth_mechanism) -> rabbit_auth_mechanism; +class_module(mnesia) -> rabbit_mnesia. %%--------------------------------------------------------------------------- -- cgit v1.2.1 From a6781f983a5c23281a94e153901b016e3ac7fdb8 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 16 Dec 2010 18:13:20 +0000 Subject: extra_pids => mirror_pids; Also rip out changes to the registry and just hard code that rabbit_mnesia takes care of gm - the problem being that we need access to the table dfns even when rabbit app is stopped, thus the registry would be down --- include/rabbit.hrl | 2 +- src/gm.erl | 8 -------- src/rabbit_amqqueue.erl | 6 +++--- src/rabbit_mirror_queue_misc.erl | 14 +++++++------- src/rabbit_mirror_queue_slave.erl | 8 ++++---- src/rabbit_mnesia.erl | 11 +---------- src/rabbit_registry.erl | 3 +-- src/rabbit_router.erl | 4 ++-- src/rabbit_types.erl | 2 +- 9 files changed, 20 insertions(+), 38 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 9a74503c..421d5fba 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -54,7 +54,7 @@ -record(exchange, {name, type, durable, auto_delete, arguments}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, - arguments, pid, extra_pids}). + arguments, pid, mirror_pids}). %% mnesia doesn't like unary records, so we add a dummy 'value' field -record(route, {binding, value = const}). diff --git a/src/gm.erl b/src/gm.erl index d41b7a1c..0a6e346a 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -372,7 +372,6 @@ -behaviour(gen_server2). --behaviour(rabbit_mnesia). -export([create_tables/0, start_link/3, leave/1, broadcast/2, confirmed_broadcast/2, group_members/1]). @@ -412,13 +411,6 @@ {attributes, record_info(fields, gm_group)}]}). -define(TABLE_MATCH, {match, #gm_group { _ = '_' }}). --rabbit_boot_step({gm_tables, - [{description, "add GM tables to rabbit_mnesia"}, - {mfa, {rabbit_registry, register, - [mnesia, <<"gm">>, ?MODULE]}}, - {requires, rabbit_registry}, - {enables, database}]}). - -define(TAG, '$gm'). -ifdef(use_specs). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e4bc9f76..731bd234 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -207,7 +207,7 @@ declare(QueueName, Durable, AutoDelete, Args, Owner) -> arguments = Args, exclusive_owner = Owner, pid = none, - extra_pids = []}), + mirror_pids = []}), case gen_server2:call(Q#amqqueue.pid, {init, false}) of not_found -> rabbit_misc:not_found(QueueName); Q1 -> Q1 @@ -488,7 +488,7 @@ on_node_down(Node) -> rabbit_misc:execute_mnesia_transaction( fun () -> qlc:e(qlc:q([delete_queue(QueueName) || #amqqueue{name = QueueName, pid = Pid, - extra_pids = []} + mirror_pids = []} <- mnesia:table(rabbit_queue), node(Pid) == Node])) end))). @@ -503,7 +503,7 @@ pseudo_queue(QueueName, Pid) -> auto_delete = false, arguments = [], pid = Pid, - extra_pids = []}. + mirror_pids = []}. safe_delegate_call_ok(F, Pids) -> {_, Bad} = delegate:invoke(Pids, diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index d37ebe1f..237bf080 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -24,18 +24,18 @@ remove_from_queue(QueueName, DeadPids) -> DeadNodes = [node(DeadPid) || DeadPid <- DeadPids], rabbit_misc:execute_mnesia_transaction( fun () -> - [Q = #amqqueue { pid = QPid, - extra_pids = EPids }] = + [Q = #amqqueue { pid = QPid, + mirror_pids = MPids }] = mnesia:read({rabbit_queue, QueueName}), - [QPid1 | EPids1] = - [Pid || Pid <- [QPid | EPids], + [QPid1 | MPids1] = + [Pid || Pid <- [QPid | MPids], not lists:member(node(Pid), DeadNodes)], - case {{QPid, EPids}, {QPid1, EPids1}} of + case {{QPid, MPids}, {QPid1, MPids1}} of {Same, Same} -> QPid; _ -> - Q1 = Q #amqqueue { pid = QPid1, - extra_pids = EPids1 }, + Q1 = Q #amqqueue { pid = QPid1, + mirror_pids = MPids1 }, mnesia:write(rabbit_queue, Q1, write), QPid1 end diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 452cbd5a..a9429ab8 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -108,13 +108,13 @@ init([#amqqueue { name = QueueName } = Q]) -> Node = node(), case rabbit_misc:execute_mnesia_transaction( fun () -> - [Q1 = #amqqueue { pid = QPid, extra_pids = EPids }] = + [Q1 = #amqqueue { pid = QPid, mirror_pids = MPids }] = mnesia:read({rabbit_queue, QueueName}), - case [Pid || Pid <- [QPid | EPids], node(Pid) =:= Node] of + case [Pid || Pid <- [QPid | MPids], node(Pid) =:= Node] of [] -> - EPids1 = EPids ++ [Self], + MPids1 = MPids ++ [Self], mnesia:write(rabbit_queue, - Q1 #amqqueue { extra_pids = EPids1 }, + Q1 #amqqueue { mirror_pids = MPids1 }, write), {ok, QPid}; _ -> diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 1d2c3640..399bf1e0 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -38,8 +38,6 @@ -export([table_names/0]). --export([behaviour_info/1]). - %% create_tables/0 exported for helping embed RabbitMQ in or alongside %% other mnesia-using Erlang applications, such as ejabberd -export([create_tables/0]). @@ -73,9 +71,6 @@ %%---------------------------------------------------------------------------- -behaviour_info(callbacks) -> [{table_definitions, 0}]; -behaviour_info(_Other) -> undefined. - status() -> [{nodes, case mnesia:system_info(is_running) of yes -> [{Key, Nodes} || @@ -216,11 +211,7 @@ table_definitions() -> [{record_name, amqqueue}, {attributes, record_info(fields, amqqueue)}, {match, #amqqueue{name = queue_name_match(), _='_'}}]}] - ++ plugin_table_definitions(). - -plugin_table_definitions() -> - lists:append([Mod:table_definitions() - || {_Type, Mod} <- rabbit_registry:lookup_all(mnesia)]). + ++ gm:table_definitions(). binding_match() -> #binding{source = exchange_name_match(), diff --git a/src/rabbit_registry.erl b/src/rabbit_registry.erl index 935cf1d0..7a3fcb51 100644 --- a/src/rabbit_registry.erl +++ b/src/rabbit_registry.erl @@ -111,8 +111,7 @@ sanity_check_module(ClassModule, Module) -> end. class_module(exchange) -> rabbit_exchange_type; -class_module(auth_mechanism) -> rabbit_auth_mechanism; -class_module(mnesia) -> rabbit_mnesia. +class_module(auth_mechanism) -> rabbit_auth_mechanism. %%--------------------------------------------------------------------------- diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index a4ad7fbc..66fc4070 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -117,8 +117,8 @@ check_delivery(_ , _ , {_ , Qs}) -> {routed, Qs}. lookup_qpids(QNames) -> lists:foldl(fun (QName, QPids) -> case mnesia:dirty_read({rabbit_queue, QName}) of - [#amqqueue{pid = QPid, extra_pids = EPids}] -> - EPids ++ [QPid | QPids]; + [#amqqueue{pid = QPid, mirror_pids = MPids}] -> + MPids ++ [QPid | QPids]; [] -> QPids end diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index bc1f9d7e..9eca964b 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -138,7 +138,7 @@ exclusive_owner :: rabbit_types:maybe(pid()), arguments :: rabbit_framing:amqp_table(), pid :: rabbit_types:maybe(pid()), - extra_pids :: [pid()]}). + mirror_pids :: [pid()]}). -type(exchange() :: #exchange{name :: rabbit_exchange:name(), -- cgit v1.2.1 From 34b1a1eb682015345c1b5bc0d3623ac1cba27bdd Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 16 Dec 2010 18:37:44 +0000 Subject: Permit dynamic inspection of the current mirror queues. This isn't particularly lovely because the master queue itself does not know, and thus has to do an mnesia read. However, for the time being it should do --- src/rabbit_amqqueue_process.erl | 6 +++++- src/rabbit_control.erl | 6 ++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index f8ec4ec8..4dd48457 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -89,7 +89,8 @@ messages, consumers, memory, - backing_queue_status + backing_queue_status, + mirror_pids ]). -define(CREATION_EVENT_KEYS, @@ -761,6 +762,9 @@ i(memory, _) -> M; i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) -> BQ:status(BQS); +i(mirror_pids, #q{q = #amqqueue{name = Name}}) -> + {ok, #amqqueue{mirror_pids = MPids}} = rabbit_amqqueue:lookup(Name), + MPids; i(Item, _) -> throw({bad_argument, Item}). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index df55d961..4a0bd25e 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -353,6 +353,12 @@ format_info_item([{TableEntryKey, TableEntryType, _TableEntryValue} | _] = Value) when is_binary(TableEntryKey) andalso is_atom(TableEntryType) -> io_lib:format("~1000000000000p", [prettify_amqp_table(Value)]); +format_info_item([T | _] = Value) + when is_tuple(T) orelse is_pid(T) orelse is_binary(T) orelse is_atom(T) orelse + is_list(T) -> + "[" ++ + lists:nthtail(2, lists:append( + [", " ++ format_info_item(E) || E <- Value])) ++ "]"; format_info_item(Value) -> io_lib:format("~w", [Value]). -- cgit v1.2.1 From 05374b4d1542334c85915a9ec27d79ba5d5c08f4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 17 Dec 2010 00:31:15 +0000 Subject: Support maybe_run_queue_via_backing_queue in the slaves, and add some comments about where to deal with confirmations. I think. Assuming my understanding of pubacks is right. --- src/rabbit_mirror_queue_master.erl | 2 +- src/rabbit_mirror_queue_slave.erl | 25 ++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 2299c3d1..0d64ab8e 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -52,7 +52,7 @@ stop() -> %% Same as start/1. exit({not_valid_for_generic_backing_queue, ?MODULE}). -init(#amqqueue { arguments = Args, durable = false } = Q, Recover) -> +init(#amqqueue { arguments = Args } = Q, Recover) -> {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, undefined), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index a9429ab8..ac49b10b 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -86,7 +86,9 @@ sender_queues, %% :: Pid -> MsgQ guid_ack, %% :: Guid -> AckTag - instructions %% :: InstrQ + instructions, %% :: InstrQ + + guid_to_channel %% for confirms }). -define(RAM_DURATION_UPDATE_INTERVAL, 5000). @@ -138,7 +140,9 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), guid_ack = dict:new(), - instructions = queue:new() + instructions = queue:new(), + + guid_to_channel = dict:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}; @@ -172,8 +176,14 @@ handle_call({gm_deaths, Deaths}, From, gen_server2:reply(From, ok), ok = gm:broadcast(GM, heartbeat), noreply(State #state { master_node = MNode1 }) - end. + end; +handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> + reply(ok, maybe_run_queue_via_backing_queue(Fun, State)). + + +handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> + noreply(maybe_run_queue_via_backing_queue(Fun, State)); handle_cast({gm, Instruction}, State = #state { instructions = InstrQ }) -> State1 = State #state { instructions = queue:in(Instruction, InstrQ) }, @@ -271,6 +281,12 @@ handle_msg([SPid], _From, Msg) -> %% Others %% --------------------------------------------------------------------------- +maybe_run_queue_via_backing_queue( + Fun, State = #state { backing_queue_state = BQS }) -> + %% TODO: some CONFIRM-like thing with these Guids + {_Guids, BQS1} = Fun(BQS), + State #state { backing_queue_state = BQS1 }. + handle_process_result({continue, State}) -> noreply(State); handle_process_result({stop, State}) -> {stop, normal, State}. @@ -380,6 +396,7 @@ process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, {processed, case Deliver of false -> + %% RECORD CONFIRM - modify MsgProps BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), State1 #state {backing_queue_state = BQS1 }; {true, AckRequired} -> @@ -439,6 +456,7 @@ process_instruction({ack, Guids}, {AckTags, GA1} = guids_to_acktags(Guids, GA), {Guids1, BQS1} = BQ:ack(AckTags, BQS), [] = Guids1 -- Guids, %% ASSERTION + %% CONFIRM - persistent but delivered faster than disk sync {processed, State #state { guid_ack = GA1, backing_queue_state = BQS1 }}; process_instruction({requeue, MsgPropsFun, Guids}, @@ -457,6 +475,7 @@ process_instruction({requeue, MsgPropsFun, Guids}, %% GA {_Count, BQS1} = BQ:purge(BQS), {Guids, BQS2} = ack_all(BQ, GA, BQS1), + %% CONFIRM these Guids State #state { guid_ack = dict:new(), backing_queue_state = BQS2 } end}; -- cgit v1.2.1 From c0bf0c0b7d471fd20d1a6ec4cd09365a2f4f4749 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 17 Dec 2010 00:31:55 +0000 Subject: Correct places to do confirmation stuff --- src/rabbit_mirror_queue_slave.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index ac49b10b..7fb13c5c 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -456,7 +456,6 @@ process_instruction({ack, Guids}, {AckTags, GA1} = guids_to_acktags(Guids, GA), {Guids1, BQS1} = BQ:ack(AckTags, BQS), [] = Guids1 -- Guids, %% ASSERTION - %% CONFIRM - persistent but delivered faster than disk sync {processed, State #state { guid_ack = GA1, backing_queue_state = BQS1 }}; process_instruction({requeue, MsgPropsFun, Guids}, @@ -475,7 +474,6 @@ process_instruction({requeue, MsgPropsFun, Guids}, %% GA {_Count, BQS1} = BQ:purge(BQS), {Guids, BQS2} = ack_all(BQ, GA, BQS1), - %% CONFIRM these Guids State #state { guid_ack = dict:new(), backing_queue_state = BQS2 } end}; -- cgit v1.2.1 From bf91d41b3684cda8c5c15bda13cf616d53116530 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 17 Dec 2010 12:05:43 +0000 Subject: That might just be enough to support confirms --- src/rabbit_mirror_queue_slave.erl | 64 +++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 16 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 7fb13c5c..d4623bf5 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -282,10 +282,34 @@ handle_msg([SPid], _From, Msg) -> %% --------------------------------------------------------------------------- maybe_run_queue_via_backing_queue( - Fun, State = #state { backing_queue_state = BQS }) -> - %% TODO: some CONFIRM-like thing with these Guids - {_Guids, BQS1} = Fun(BQS), - State #state { backing_queue_state = BQS1 }. + Fun, State = #state { backing_queue_state = BQS, + guid_to_channel = GTC }) -> + {Guids, BQS1} = Fun(BQS), + GTC1 = lists:foldl(fun maybe_confirm_message/2, GTC, Guids), + State #state { backing_queue_state = BQS1, + guid_to_channel = GTC1 }. + +record_confirm_or_confirm(#delivery { msg_seq_no = undefined }, _Q, GTC) -> + GTC; +record_confirm_or_confirm( + #delivery { sender = ChPid, + message = #basic_message { is_persistent = true, + guid = Guid }, + msg_seq_no = MsgSeqNo }, #amqqueue { durable = true }, GTC) -> + dict:store(Guid, {ChPid, MsgSeqNo}, GTC); +record_confirm_or_confirm(#delivery { sender = ChPid, msg_seq_no = MsgSeqNo }, + _Q, GTC) -> + ok = rabbit_channel:confirm(ChPid, MsgSeqNo), + GTC. + +maybe_confirm_message(Guid, GTC) -> + case dict:find(Guid, GTC) of + {ok, {ChPid, MsgSeqNo}} when MsgSeqNo =/= undefined -> + ok = rabbit_channel:confirm(ChPid, MsgSeqNo), + dict:erase(Guid, GTC); + error -> + GTC + end. handle_process_result({continue, State}) -> noreply(State); handle_process_result({stop, State}) -> {stop, normal, State}. @@ -361,7 +385,7 @@ enqueue_message(Delivery = #delivery { sender = ChPid }, false -> {continue, State1} end. -process_instructions(State = #state { instructions = InstrQ }) -> +process_instructions(State = #state { instructions = InstrQ }) -> case queue:out(InstrQ) of {empty, _InstrQ} -> {continue, State}; @@ -378,10 +402,12 @@ process_instructions(State = #state { instructions = InstrQ }) -> end. process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, - State = #state { sender_queues = SQ, + State = #state { q = Q, + sender_queues = SQ, backing_queue = BQ, backing_queue_state = BQS, - guid_ack = GA }) -> + guid_ack = GA, + guid_to_channel = GTC }) -> case dict:find(ChPid, SQ) of error -> blocked; @@ -389,26 +415,32 @@ process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, case queue:out(Q) of {empty, _Q} -> blocked; - {{value, #delivery { - message = Msg = #basic_message { guid = Guid } }}, Q1} -> + {{value, Delivery = #delivery { + message = Msg = #basic_message { guid = Guid } }}, + Q1} -> State1 = State #state { sender_queues = dict:store(ChPid, Q1, SQ) }, + GTC1 = record_confirm_or_confirm(Delivery, Q, GTC), {processed, case Deliver of false -> - %% RECORD CONFIRM - modify MsgProps BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), - State1 #state {backing_queue_state = BQS1 }; + State1 #state { backing_queue_state = BQS1, + guid_to_channel = GTC1 }; {true, AckRequired} -> {AckTag, BQS1} = BQ:publish_delivered( AckRequired, Msg, MsgProps, ChPid, BQS), - GA1 = case AckRequired of - true -> dict:store(Guid, AckTag, GA); - false -> GA - end, + {GA1, GTC2} = + case AckRequired of + true -> + {dict:store(Guid, AckTag, GA), GTC1}; + false -> + {GA, maybe_confirm_message(Guid, GTC1)} + end, State1 #state { backing_queue_state = BQS1, - guid_ack = GA1 } + guid_ack = GA1, + guid_to_channel = GTC2 } end}; {{value, #delivery {}}, _Q1} -> %% throw away the instruction: we'll never receive -- cgit v1.2.1 From a8ba00e17e58ce3aa3d20d510566f9d901a072fa Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 17 Dec 2010 13:51:02 +0000 Subject: Accidental unification going on of two things named Q; fix a bug which led to repeated calls to BQ:delete_and_terminate (which turns out not to be idempotent) --- src/rabbit_mirror_queue_slave.erl | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index d4623bf5..166f473a 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -224,6 +224,10 @@ handle_info(Msg, State) -> %% slave, we have no idea whether or not we'll be the only copy coming %% back up. Thus we must assume we will be, and preserve anything we %% have on disk. +terminate(_Reason, #state { backing_queue_state = undefined }) -> + %% We've received a delete_and_terminate from gm, thus nothing to + %% do here. + ok; terminate(Reason, #state { q = Q, gm = GM, backing_queue = BQ, @@ -411,15 +415,15 @@ process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, case dict:find(ChPid, SQ) of error -> blocked; - {ok, Q} -> - case queue:out(Q) of - {empty, _Q} -> + {ok, MQ} -> + case queue:out(MQ) of + {empty, _MQ} -> blocked; {{value, Delivery = #delivery { message = Msg = #basic_message { guid = Guid } }}, - Q1} -> + MQ1} -> State1 = State #state { sender_queues = - dict:store(ChPid, Q1, SQ) }, + dict:store(ChPid, MQ1, SQ) }, GTC1 = record_confirm_or_confirm(Delivery, Q, GTC), {processed, case Deliver of @@ -442,7 +446,7 @@ process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, guid_ack = GA1, guid_to_channel = GTC2 } end}; - {{value, #delivery {}}, _Q1} -> + {{value, #delivery {}}, _MQ1} -> %% throw away the instruction: we'll never receive %% the message to which it corresponds. {processed, State} @@ -512,8 +516,8 @@ process_instruction({requeue, MsgPropsFun, Guids}, process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> - {stop, State #state { - backing_queue_state = BQ:delete_and_terminate(BQS) }}. + BQ:delete_and_terminate(BQS), + {stop, State #state { backing_queue_state = undefined }}. guids_to_acktags(Guids, GA) -> {AckTags, GA1} = -- cgit v1.2.1 From 586a9cf3740489e8ef95fd0e51bf7aacda9ab8b9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 17 Dec 2010 14:57:13 +0000 Subject: Fix a race condition that can occur on queue deletion. Also change boot order to make sure the msg_stores are started before us (and thus stopped after us) --- src/rabbit_mirror_queue_coordinator.erl | 9 ++++++--- src/rabbit_mirror_queue_misc.erl | 32 ++++++++++++++++++-------------- src/rabbit_mirror_queue_slave.erl | 14 ++++++++------ src/rabbit_mirror_queue_slave_sup.erl | 4 ++-- 4 files changed, 34 insertions(+), 25 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index fb650144..6303952d 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -79,9 +79,12 @@ handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> rabbit_log:info("Master ~p saw deaths ~p for queue ~p~n", [self(), Deaths, QueueName]), - Node = node(), - Node = node(rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths)), - noreply(State). + case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of + {ok, Pid} when node(Pid) =:= node() -> + noreply(State); + {error, not_found} -> + {stop, normal, State} + end. handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}. diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 237bf080..05602076 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -24,19 +24,23 @@ remove_from_queue(QueueName, DeadPids) -> DeadNodes = [node(DeadPid) || DeadPid <- DeadPids], rabbit_misc:execute_mnesia_transaction( fun () -> - [Q = #amqqueue { pid = QPid, - mirror_pids = MPids }] = - mnesia:read({rabbit_queue, QueueName}), - [QPid1 | MPids1] = - [Pid || Pid <- [QPid | MPids], - not lists:member(node(Pid), DeadNodes)], - case {{QPid, MPids}, {QPid1, MPids1}} of - {Same, Same} -> - QPid; - _ -> - Q1 = Q #amqqueue { pid = QPid1, - mirror_pids = MPids1 }, - mnesia:write(rabbit_queue, Q1, write), - QPid1 + %% Someone else could have deleted the queue before we + %% get here. + case mnesia:read({rabbit_queue, QueueName}) of + [] -> {error, not_found}; + [Q = #amqqueue { pid = QPid, + mirror_pids = MPids }] -> + [QPid1 | MPids1] = + [Pid || Pid <- [QPid | MPids], + not lists:member(node(Pid), DeadNodes)], + case {{QPid, MPids}, {QPid1, MPids1}} of + {Same, Same} -> + {ok, QPid}; + _ -> + Q1 = Q #amqqueue { pid = QPid1, + mirror_pids = MPids1 }, + mnesia:write(rabbit_queue, Q1, write), + {ok, QPid1} + end end end). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 166f473a..f124bc9e 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -166,16 +166,18 @@ handle_call({gm_deaths, Deaths}, From, master_node = MNode }) -> rabbit_log:info("Slave ~p saw deaths ~p for queue ~p~n", [self(), Deaths, QueueName]), - case {node(), node(rabbit_mirror_queue_misc:remove_from_queue( - QueueName, Deaths))} of - {_Node, MNode} -> + case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of + {ok, Pid} when node(Pid) =:= MNode -> reply(ok, State); - {Node, Node} -> + {ok, Pid} when node(Pid) =:= node() -> promote_me(From, State); - {_Node, MNode1} -> + {ok, Pid} -> gen_server2:reply(From, ok), ok = gm:broadcast(GM, heartbeat), - noreply(State #state { master_node = MNode1 }) + noreply(State #state { master_node = node(Pid) }); + {error, not_found} -> + gen_server2:reply(From, ok), + {stop, normal, State} end; handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl index 6658e6c3..80c0520c 100644 --- a/src/rabbit_mirror_queue_slave_sup.erl +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -19,8 +19,8 @@ -rabbit_boot_step({mirror_queue_slave_sup, [{description, "mirror queue slave sup"}, {mfa, {rabbit_mirror_queue_slave_sup, start, []}}, - {requires, core_initialized}, - {enables, queue_sup_queue_recovery}]}). + {requires, queue_sup_queue_recovery}, + {enables, routing_ready}]}). -behaviour(supervisor2). -- cgit v1.2.1 From af2b47438dfe451b2fcd508dbf868e599478636e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 17 Dec 2010 16:42:34 +0000 Subject: Ensure that promoted slaves monitor the queue's exclusive owner --- src/rabbit_amqqueue_process.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 4dd48457..601f28e3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -132,8 +132,13 @@ init(Q) -> guid_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -init_with_backing_queue_state(Q, BQ, BQS, RateTRef, AckTags, Deliveries) -> +init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS, + RateTRef, AckTags, Deliveries) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), + case Owner of + none -> ok; + _ -> erlang:monitor(process, Owner) + end, State = requeue_and_run( AckTags, process_args( -- cgit v1.2.1 From 7b526a7941abafe79d8280d46577e215a8e1600b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 19 Dec 2010 00:54:51 +0000 Subject: Fixed a bug caused by revision 454fbb9127bd in rabbit-ha from where gm.erl came; Make gm_tests more robust; Avoid creating endless funs all the time for every message (substantial performance gain). Abstract use of dicts - expected use case is relatively small groups, thus orddict would normally be more appropriate --- src/gm.erl | 118 ++++++++++++++++++++++++++++---------------------------- src/gm_test.erl | 10 ++++- 2 files changed, 66 insertions(+), 62 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 0a6e346a..6a2c9c48 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -387,6 +387,7 @@ -define(HIBERNATE_AFTER_MIN, 1000). -define(DESIRED_HIBERNATE, 10000). -define(SETS, ordsets). +-define(DICT, orddict). -record(state, { self, @@ -574,10 +575,7 @@ handle_cast({?TAG, ReqVer, Msg}, end, handle_callback_result( if_callback_success( - Result, - fun (_Result1, State2) -> handle_msg(Msg, State2) end, - fun (Result1, State2) -> {Result1, State2} end, - State1)); + Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)); handle_cast({broadcast, _Msg}, State = #state { members_state = undefined }) -> noreply(State); @@ -617,6 +615,9 @@ handle_info({'DOWN', MRef, process, _Pid, _Reason}, left = Left, right = Right, group_name = GroupName, + view = View, + module = Module, + callback_args = Args, confirms = Confirms }) -> Member = case {Left, Right} of {{Member1, MRef}, _} -> Member1; @@ -638,7 +639,11 @@ handle_info({'DOWN', MRef, process, _Pid, _Reason}, members_state = blank_member_state(), confirms = purge_confirms(Confirms) }); _ -> - {ok, State1} + %% here we won't be pointing out any deaths: + %% the concern is that there maybe births + %% which we'd otherwise miss. + {callback_view_changed(Args, Module, View, View1), + State1} end, handle_callback_result({Result, check_neighbours(State2)}) end. @@ -674,8 +679,8 @@ handle_msg({catchup, Left, MembersStateLeft}, members_state = MembersState }) when MembersState =/= undefined -> MembersStateLeft1 = build_members_state(MembersStateLeft), - AllMembers = lists:usort(dict:fetch_keys(MembersState) ++ - dict:fetch_keys(MembersStateLeft1)), + AllMembers = lists:usort(?DICT:fetch_keys(MembersState) ++ + ?DICT:fetch_keys(MembersStateLeft1)), {MembersState1, Activity} = lists:foldl( fun (Id, MembersStateActivity) -> @@ -709,11 +714,9 @@ handle_msg({catchup, _NotLeft, _MembersState}, State) -> handle_msg({activity, Left, Activity}, State = #state { self = Self, left = {Left, _MRefL}, - module = Module, view = View, members_state = MembersState, - confirms = Confirms, - callback_args = Args }) + confirms = Confirms }) when MembersState =/= undefined -> {MembersState1, {Confirms1, Activity1}} = lists:foldl( @@ -755,31 +758,18 @@ handle_msg({activity, Left, Activity}, {Result, State2} = maybe_erase_aliases(State1), ok = maybe_send_activity(Activity3, State2), if_callback_success( - Result, - fun (_Result1, State3) -> {callback(Args, Module, Activity3), State3} end, - fun (Result1, State3) -> {Result1, State3} end, - State2); + Result, fun activity_true/3, fun activity_false/3, Activity3, State2); handle_msg({activity, _NotLeft, _Activity}, State) -> {ok, State}. noreply(State) -> - ok = a(State), {noreply, State, hibernate}. reply(Reply, State) -> - ok = a(State), {reply, Reply, State, hibernate}. -a(#state { view = undefined }) -> - ok; -a(#state { self = Self, - left = {Left, _MRefL}, - view = View }) -> - #view_member { left = Left } = fetch_view_member(Self, View), - ok. - internal_broadcast(Msg, From, State = #state { self = Self, pub_count = PubCount, members_state = MembersState, @@ -826,25 +816,25 @@ is_member_alias(Member, Self, View) -> dead_member_id({dead, Member}) -> Member. store_view_member(VMember = #view_member { id = Id }, {Ver, View}) -> - {Ver, dict:store(Id, VMember, View)}. + {Ver, ?DICT:store(Id, VMember, View)}. with_view_member(Fun, View, Id) -> store_view_member(Fun(fetch_view_member(Id, View)), View). fetch_view_member(Id, {_Ver, View}) -> - dict:fetch(Id, View). + ?DICT:fetch(Id, View). find_view_member(Id, {_Ver, View}) -> - dict:find(Id, View). + ?DICT:find(Id, View). blank_view(Ver) -> - {Ver, dict:new()}. + {Ver, ?DICT:new()}. alive_view_members({_Ver, View}) -> - dict:fetch_keys(View). + ?DICT:fetch_keys(View). all_known_members({_Ver, View}) -> - dict:fold( + ?DICT:fold( fun (Member, #view_member { aliases = Aliases }, Acc) -> ?SETS:to_list(Aliases) ++ [Member | Acc] end, [], View). @@ -1155,28 +1145,28 @@ with_member_acc(Fun, Id, {MembersState, Acc}) -> {store_member(Id, MemberState, MembersState), Acc1}. find_member_or_blank(Id, MembersState) -> - case dict:find(Id, MembersState) of + case ?DICT:find(Id, MembersState) of {ok, Result} -> Result; error -> blank_member() end. erase_member(Id, MembersState) -> - dict:erase(Id, MembersState). + ?DICT:erase(Id, MembersState). blank_member() -> #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }. blank_member_state() -> - dict:new(). + ?DICT:new(). store_member(Id, MemberState, MembersState) -> - dict:store(Id, MemberState, MembersState). + ?DICT:store(Id, MemberState, MembersState). prepare_members_state(MembersState) -> - dict:to_list(MembersState). + ?DICT:to_list(MembersState). build_members_state(MembersStateList) -> - dict:from_list(MembersStateList). + ?DICT:from_list(MembersStateList). %% --------------------------------------------------------------------------- @@ -1228,24 +1218,34 @@ callback_view_changed(Args, Module, OldView, NewView) -> handle_callback_result({Result, State}) -> if_callback_success( - Result, - fun (_Result, State1) -> noreply(State1) end, - fun ({stop, Reason}, State1) -> {stop, Reason, State1} end, - State); + Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State); handle_callback_result({Result, Reply, State}) -> if_callback_success( - Result, - fun (_Result, State1) -> reply(Reply, State1) end, - fun ({stop, Reason}, State1) -> {stop, Reason, Reply, State1} end, - State). - -if_callback_success(ok, True, _False, State) -> - True(ok, State); -if_callback_success({become, Module, Args} = Result, True, _False, State) -> - True(Result, State #state { module = Module, - callback_args = Args }); -if_callback_success({stop, _Reason} = Result, _True, False, State) -> - False(Result, State). + Result, fun reply_true/3, fun reply_false/3, Reply, State). + +no_reply_true (_Result, _Undefined, State) -> noreply(State). +no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}. + +reply_true (_Result, Reply, State) -> reply(Reply, State). +reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}. + +handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State). +handle_msg_false(Result, _Msg, State) -> {Result, State}. + +activity_true(_Result, Activity, State = #state { module = Module, + callback_args = Args }) -> + {callback(Args, Module, Activity), State}. +activity_false(Result, _Activity, State) -> + {Result, State}. + +if_callback_success(ok, True, _False, Arg, State) -> + True(ok, Arg, State); +if_callback_success( + {become, Module, Args} = Result, True, _False, Arg, State) -> + True(Result, Arg, State #state { module = Module, + callback_args = Args }); +if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) -> + False(Result, Arg, State). maybe_confirm(_Self, _Id, Confirms, []) -> Confirms; @@ -1282,14 +1282,12 @@ queue_from_pubs(Pubs) -> apply_acks([], Pubs) -> Pubs; -apply_acks([PubNum | Acks], Pubs) -> - {{value, {PubNum, _Msg}}, Pubs1} = queue:out(Pubs), - apply_acks(Acks, Pubs1). - -join_pubs(Q, []) -> - Q; -join_pubs(Q, Pubs) -> - queue:join(Q, queue_from_pubs(Pubs)). +apply_acks(List, Pubs) -> + {_, Pubs1} = queue:split(length(List), Pubs), + Pubs1. + +join_pubs(Q, []) -> Q; +join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)). last_ack([], LA) -> LA; diff --git a/src/gm_test.erl b/src/gm_test.erl index aebfbb69..e8f28598 100644 --- a/src/gm_test.erl +++ b/src/gm_test.erl @@ -59,7 +59,7 @@ members_changed([], Births, Deaths) -> lists:foldl( fun (Died, StateN) -> true = dict:is_key(Died, StateN), - dict:erase(Died, StateN) + dict:store(Died, died, StateN) end, State1, Deaths) end), ok. @@ -69,6 +69,9 @@ handle_msg([], From, {test_msg, Num}) -> with_state( fun (State) -> ok = case dict:find(From, State) of + {ok, died} -> + exit({{from, From}, + {received_posthumous_delivery, Num}}); {ok, empty} -> ok; {ok, Num} -> ok; {ok, Num1} when Num < Num1 -> @@ -78,7 +81,10 @@ handle_msg([], From, {test_msg, Num}) -> {ok, Num1} -> exit({{from, From}, {missing_delivery_of, Num}, - {received_early, Num1}}) + {received_early, Num1}}); + error -> + exit({{from, From}, + {received_premature_delivery, Num}}) end, dict:store(From, Num + 1, State) end), -- cgit v1.2.1 From dfd985400ac482349797f430978b773292eaea0f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 19 Dec 2010 01:10:14 +0000 Subject: When you send a msg to another node, if the local node doesn't already have a connection to the destination node, it has to contact epmd and try and resolve the remote node. This takes time. Thus, in the event of a distributed gm group, it's very important that we record asap when a member dies as that member might be our downstream, to which we're sending, and it might be on another node. Thus promote the DOWN messages. Because of the inherent races going on, gm is built to cope with this anyway. This has the nice benefit that promotion of slaves to master in the event of failure of master is now pretty much instantaneous --- src/gm.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/gm.erl b/src/gm.erl index 6a2c9c48..baf46471 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -377,7 +377,7 @@ confirmed_broadcast/2, group_members/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3]). + code_change/3, prioritise_info/2]). -export([behaviour_info/1]). @@ -658,6 +658,10 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. +prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; +prioritise_info(_ , _State) -> 0. + + handle_msg(check_neighbours, State) -> %% no-op - it's already been done by the calling handle_cast {ok, State}; -- cgit v1.2.1 From b73f2e5f8dd433fce76e1a8dee20596d6dbfd144 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 20 Dec 2010 00:26:33 +0000 Subject: Right, well the fake handling code in master is fine. The fake handling code in slave is utterly wrong. However, I need to sleep --- src/rabbit_mirror_queue_master.erl | 63 +++++++++++++------ src/rabbit_mirror_queue_slave.erl | 125 ++++++++++++++++++++++++++----------- 2 files changed, 135 insertions(+), 53 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 0d64ab8e..4628796f 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -26,7 +26,7 @@ -export([start/1, stop/0]). --export([promote_backing_queue_state/4]). +-export([promote_backing_queue_state/5]). -behaviour(rabbit_backing_queue). @@ -36,7 +36,8 @@ coordinator, backing_queue, backing_queue_state, - set_delivered + set_delivered, + fakes }). %% --------------------------------------------------------------------------- @@ -64,14 +65,16 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = 0 }. + set_delivered = 0, + fakes = sets:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM) -> +promote_backing_queue_state(CPid, BQ, BQS, GM, Fakes) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = BQ:len(BQS) }. + set_delivered = BQ:len(BQS), + fakes = Fakes }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -126,30 +129,54 @@ dropwhile(Fun, State = #state { gm = GM, fetch(AckRequired, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = SetDelivered }) -> + set_delivered = SetDelivered, + fakes = Fakes }) -> {Result, BQS1} = BQ:fetch(AckRequired, BQS), - State1 = State #state { backing_queue_state = BQS1 }, case Result of empty -> - {Result, State1}; + {Result, State #state { backing_queue_state = BQS1 }}; {#basic_message { guid = Guid } = Message, IsDelivered, AckTag, Remaining} -> - ok = gm:broadcast(GM, {fetch, AckRequired, Guid, Remaining}), - IsDelivered1 = IsDelivered orelse SetDelivered > 0, SetDelivered1 = lists:max([0, SetDelivered - 1]), - {{Message, IsDelivered1, AckTag, Remaining}, - State1 #state { set_delivered = SetDelivered1 }} + case sets:is_element(Guid, Fakes) of + true -> + {BQS2, Fakes1} = + case AckRequired of + true -> {[Guid], BQS3} = BQ:ack([AckTag], BQS1), + {BQS3, Fakes}; + false -> {BQS1, sets:del_element(Guid, Fakes)} + end, + ok = gm:broadcast(GM, {fetch, false, Guid, Remaining}), + fetch(AckRequired, + State #state { backing_queue_state = BQS2, + set_delivered = SetDelivered1, + fakes = Fakes1 }); + false -> + ok = gm:broadcast(GM, + {fetch, AckRequired, Guid, Remaining}), + IsDelivered1 = IsDelivered orelse SetDelivered > 0, + Fakes1 = case SetDelivered + SetDelivered1 of + 1 -> sets:new(); %% transition to 0 + _ -> Fakes + end, + {{Message, IsDelivered1, AckTag, Remaining}, + State #state { backing_queue_state = BQS1, + set_delivered = SetDelivered1, + fakes = Fakes1 }} + end end. ack(AckTags, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS }) -> + backing_queue_state = BQS, + fakes = Fakes }) -> {Guids, BQS1} = BQ:ack(AckTags, BQS), - case Guids of - [] -> ok; - _ -> ok = gm:broadcast(GM, {ack, Guids}) - end, - {Guids, State #state { backing_queue_state = BQS1 }}. + Fakes1 = case Guids of + [] -> Fakes; + _ -> ok = gm:broadcast(GM, {ack, Guids}), + sets:difference(Fakes, sets:from_list(Guids)) + end, + {Guids, State #state { backing_queue_state = BQS1, fakes = Fakes1 }}. tx_publish(Txn, Msg, MsgProps, ChPid, #state {} = State) -> %% gm:broadcast(GM, {tx_publish, Txn, Guid, MsgProps, ChPid}) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index f124bc9e..0134787c 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -75,6 +75,7 @@ -behaviour(gm). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -include("gm_specs.hrl"). -record(state, { q, @@ -87,6 +88,7 @@ sender_queues, %% :: Pid -> MsgQ guid_ack, %% :: Guid -> AckTag instructions, %% :: InstrQ + fakes, %% :: Set Guid guid_to_channel %% for confirms }). @@ -141,6 +143,7 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), guid_ack = dict:new(), instructions = queue:new(), + fakes = sets:new(), guid_to_channel = dict:new() }, hibernate, @@ -190,7 +193,7 @@ handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> handle_cast({gm, Instruction}, State = #state { instructions = InstrQ }) -> State1 = State #state { instructions = queue:in(Instruction, InstrQ) }, case queue:is_empty(InstrQ) of - true -> handle_process_result(process_instructions(State1)); + true -> handle_process_result(process_instructions(false, State1)); false -> noreply(State1) end; @@ -320,21 +323,25 @@ maybe_confirm_message(Guid, GTC) -> handle_process_result({continue, State}) -> noreply(State); handle_process_result({stop, State}) -> {stop, normal, State}. -promote_me(From, #state { q = Q, - gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - rate_timer_ref = RateTRef, - sender_queues = SQ, - guid_ack = GA }) -> +promote_me(From, State = #state { q = Q }) -> rabbit_log:info("Promoting slave ~p for queue ~p~n", [self(), Q #amqqueue.name]), + #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + rate_timer_ref = RateTRef, + sender_queues = SQ, + guid_ack = GA, + instructions = Instr, + fakes = Fakes } = + process_instructions(true, State), + true = queue:is_empty(Instr), %% ASSERTION {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), true = unlink(GM), gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM), + CPid, BQ, BQS, GM, Fakes), %% We have to do the requeue via this init because otherwise we %% don't have access to the relevent MsgPropsFun. Also, we are %% already in mnesia as the master queue pid. Thus we cannot just @@ -387,19 +394,19 @@ enqueue_message(Delivery = #delivery { sender = ChPid }, SQ1 = dict:store(ChPid, queue:in(Delivery, Q), SQ), State1 = State #state { sender_queues = SQ1 }, case queue:is_empty(Q) of - true -> process_instructions(State1); + true -> process_instructions(false, State1); false -> {continue, State1} end. -process_instructions(State = #state { instructions = InstrQ }) -> +process_instructions(Flush, State = #state { instructions = InstrQ }) -> case queue:out(InstrQ) of {empty, _InstrQ} -> {continue, State}; {{value, Instr}, InstrQ1} -> - case process_instruction(Instr, State) of + case process_instruction(Flush, Instr, State) of {processed, State1} -> process_instructions( - State1 #state { instructions = InstrQ1 }); + Flush, State1 #state { instructions = InstrQ1 }); {stop, State1} -> {stop, State1 #state { instructions = InstrQ1 }}; blocked -> @@ -407,20 +414,38 @@ process_instructions(State = #state { instructions = InstrQ }) -> end end. -process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, +process_instruction(Flush, {publish, Deliver, Guid, MsgProps, ChPid} = Instr, State = #state { q = Q, sender_queues = SQ, backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA, - guid_to_channel = GTC }) -> + guid_to_channel = GTC, + fakes = Fakes }) -> case dict:find(ChPid, SQ) of error -> - blocked; + case Flush of + true -> MQ = queue:from_list([fake_delivery(Q, Guid, ChPid)]), + State1 = State #state { + sender_queues = dict:store(ChPid, MQ, SQ), + fakes = sets:add_element(Guid, Fakes) }, + process_instruction(Flush, Instr, State1); + false -> blocked + end; {ok, MQ} -> case queue:out(MQ) of {empty, _MQ} -> - blocked; + case Flush of + true -> + MQ1 = queue:in_r(fake_delivery(Q, Guid, ChPid), MQ), + SQ1 = dict:store(ChPid, MQ1, SQ), + State1 = State #state { + sender_queues = SQ1, + fakes = sets:add_element(Guid, Fakes) }, + process_instruction(Flush, Instr, State1); + false -> + blocked + end; {{value, Delivery = #delivery { message = Msg = #basic_message { guid = Guid } }}, MQ1} -> @@ -449,28 +474,41 @@ process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, guid_to_channel = GTC2 } end}; {{value, #delivery {}}, _MQ1} -> - %% throw away the instruction: we'll never receive - %% the message to which it corresponds. - {processed, State} + MQ1 = queue:in_r(fake_delivery(Q, Guid, ChPid), MQ), + State1 = State #state { + sender_queues = dict:store(ChPid, MQ1, SQ), + fakes = sets:add_element(Guid, Fakes) }, + process_instruction(Flush, Instr, State1) end end; -process_instruction({set_length, Length}, +process_instruction(_Flush, {set_length, Length}, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> + backing_queue_state = BQS, + fakes = Fakes }) -> QLen = BQ:len(BQS), ToDrop = QLen - Length, {processed, case ToDrop > 0 of - true -> BQS1 = lists:foldl( - fun (const, BQSN) -> BQ:fetch(false, BQSN) end, - BQS, lists:duplicate(ToDrop, const)), - State #state { backing_queue_state = BQS1 }; - false -> State + true -> + {Guids, BQS1} = + lists:foldl( + fun (const, {GuidsN, BQSN}) -> + {{#basic_message { guid = Guid }, _IsDelivered, + _AckTag, _Remaining}, BQSN1} = + BQ:fetch(false, BQSN), + {[Guid | GuidsN], BQSN1} + end, BQS, lists:duplicate(ToDrop, const)), + Fakes1 = sets:difference(Fakes, sets:from_list(Guids)), + State #state { backing_queue_state = BQS1, + fakes = Fakes1 }; + false -> + State end}; -process_instruction({fetch, AckRequired, Guid, Remaining}, +process_instruction(_Flush, {fetch, AckRequired, Guid, Remaining}, State = #state { backing_queue = BQ, backing_queue_state = BQS, - guid_ack = GA }) -> + guid_ack = GA, + fakes = Fakes }) -> QLen = BQ:len(BQS), {processed, case QLen - 1 of @@ -481,22 +519,28 @@ process_instruction({fetch, AckRequired, Guid, Remaining}, true -> dict:store(Guid, AckTag, GA); false -> GA end, + Fakes1 = sets:del_element(Guid, Fakes), State #state { backing_queue_state = BQS1, - guid_ack = GA1 }; + guid_ack = GA1, + fakes = Fakes1 }; Other when Other < Remaining -> %% we must be shorter than the master + false = sets:is_element(Guid, Fakes), %% ASSERTION State end}; -process_instruction({ack, Guids}, +process_instruction(_Flush, {ack, Guids}, State = #state { backing_queue = BQ, backing_queue_state = BQS, - guid_ack = GA }) -> + guid_ack = GA, + fakes = Fakes }) -> {AckTags, GA1} = guids_to_acktags(Guids, GA), {Guids1, BQS1} = BQ:ack(AckTags, BQS), [] = Guids1 -- Guids, %% ASSERTION + Fakes1 = sets:difference(Fakes, sets:from_list(Guids)), {processed, State #state { guid_ack = GA1, - backing_queue_state = BQS1 }}; -process_instruction({requeue, MsgPropsFun, Guids}, + backing_queue_state = BQS1, + fakes = Fakes1 }}; +process_instruction(_Flush, {requeue, MsgPropsFun, Guids}, State = #state { backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA }) -> @@ -515,7 +559,7 @@ process_instruction({requeue, MsgPropsFun, Guids}, State #state { guid_ack = dict:new(), backing_queue_state = BQS2 } end}; -process_instruction(delete_and_terminate, +process_instruction(_Flush, delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:delete_and_terminate(BQS), @@ -534,3 +578,14 @@ guids_to_acktags(Guids, GA) -> ack_all(BQ, GA, BQS) -> BQ:ack([AckTag || {_Guid, AckTag} <- dict:to_list(GA)], BQS). + +fake_delivery(#amqqueue { name = QueueName }, Guid, ChPid) -> + ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>), + Msg = (rabbit_basic:message(ExchangeName, <<>>, #'P_basic'{}, <<>>)) + #basic_message { guid = Guid }, + #delivery { mandatory = false, + immediate = false, + txn = none, + sender = ChPid, + message = Msg, + msg_seq_no = undefined }. -- cgit v1.2.1 From ac7bba488a805bf0e8248c42f861ccd7cb3aba63 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 20 Dec 2010 13:56:21 +0000 Subject: Revert the previous changeset as I've decided to solve this differently --- src/rabbit_mirror_queue_master.erl | 63 ++++++------------- src/rabbit_mirror_queue_slave.erl | 125 +++++++++++-------------------------- 2 files changed, 53 insertions(+), 135 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 4628796f..0d64ab8e 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -26,7 +26,7 @@ -export([start/1, stop/0]). --export([promote_backing_queue_state/5]). +-export([promote_backing_queue_state/4]). -behaviour(rabbit_backing_queue). @@ -36,8 +36,7 @@ coordinator, backing_queue, backing_queue_state, - set_delivered, - fakes + set_delivered }). %% --------------------------------------------------------------------------- @@ -65,16 +64,14 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = 0, - fakes = sets:new() }. + set_delivered = 0 }. -promote_backing_queue_state(CPid, BQ, BQS, GM, Fakes) -> +promote_backing_queue_state(CPid, BQ, BQS, GM) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = BQ:len(BQS), - fakes = Fakes }. + set_delivered = BQ:len(BQS) }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -129,54 +126,30 @@ dropwhile(Fun, State = #state { gm = GM, fetch(AckRequired, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = SetDelivered, - fakes = Fakes }) -> + set_delivered = SetDelivered }) -> {Result, BQS1} = BQ:fetch(AckRequired, BQS), + State1 = State #state { backing_queue_state = BQS1 }, case Result of empty -> - {Result, State #state { backing_queue_state = BQS1 }}; + {Result, State1}; {#basic_message { guid = Guid } = Message, IsDelivered, AckTag, Remaining} -> + ok = gm:broadcast(GM, {fetch, AckRequired, Guid, Remaining}), + IsDelivered1 = IsDelivered orelse SetDelivered > 0, SetDelivered1 = lists:max([0, SetDelivered - 1]), - case sets:is_element(Guid, Fakes) of - true -> - {BQS2, Fakes1} = - case AckRequired of - true -> {[Guid], BQS3} = BQ:ack([AckTag], BQS1), - {BQS3, Fakes}; - false -> {BQS1, sets:del_element(Guid, Fakes)} - end, - ok = gm:broadcast(GM, {fetch, false, Guid, Remaining}), - fetch(AckRequired, - State #state { backing_queue_state = BQS2, - set_delivered = SetDelivered1, - fakes = Fakes1 }); - false -> - ok = gm:broadcast(GM, - {fetch, AckRequired, Guid, Remaining}), - IsDelivered1 = IsDelivered orelse SetDelivered > 0, - Fakes1 = case SetDelivered + SetDelivered1 of - 1 -> sets:new(); %% transition to 0 - _ -> Fakes - end, - {{Message, IsDelivered1, AckTag, Remaining}, - State #state { backing_queue_state = BQS1, - set_delivered = SetDelivered1, - fakes = Fakes1 }} - end + {{Message, IsDelivered1, AckTag, Remaining}, + State1 #state { set_delivered = SetDelivered1 }} end. ack(AckTags, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS, - fakes = Fakes }) -> + backing_queue_state = BQS }) -> {Guids, BQS1} = BQ:ack(AckTags, BQS), - Fakes1 = case Guids of - [] -> Fakes; - _ -> ok = gm:broadcast(GM, {ack, Guids}), - sets:difference(Fakes, sets:from_list(Guids)) - end, - {Guids, State #state { backing_queue_state = BQS1, fakes = Fakes1 }}. + case Guids of + [] -> ok; + _ -> ok = gm:broadcast(GM, {ack, Guids}) + end, + {Guids, State #state { backing_queue_state = BQS1 }}. tx_publish(Txn, Msg, MsgProps, ChPid, #state {} = State) -> %% gm:broadcast(GM, {tx_publish, Txn, Guid, MsgProps, ChPid}) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 0134787c..f124bc9e 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -75,7 +75,6 @@ -behaviour(gm). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -include("gm_specs.hrl"). -record(state, { q, @@ -88,7 +87,6 @@ sender_queues, %% :: Pid -> MsgQ guid_ack, %% :: Guid -> AckTag instructions, %% :: InstrQ - fakes, %% :: Set Guid guid_to_channel %% for confirms }). @@ -143,7 +141,6 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), guid_ack = dict:new(), instructions = queue:new(), - fakes = sets:new(), guid_to_channel = dict:new() }, hibernate, @@ -193,7 +190,7 @@ handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> handle_cast({gm, Instruction}, State = #state { instructions = InstrQ }) -> State1 = State #state { instructions = queue:in(Instruction, InstrQ) }, case queue:is_empty(InstrQ) of - true -> handle_process_result(process_instructions(false, State1)); + true -> handle_process_result(process_instructions(State1)); false -> noreply(State1) end; @@ -323,25 +320,21 @@ maybe_confirm_message(Guid, GTC) -> handle_process_result({continue, State}) -> noreply(State); handle_process_result({stop, State}) -> {stop, normal, State}. -promote_me(From, State = #state { q = Q }) -> +promote_me(From, #state { q = Q, + gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + rate_timer_ref = RateTRef, + sender_queues = SQ, + guid_ack = GA }) -> rabbit_log:info("Promoting slave ~p for queue ~p~n", [self(), Q #amqqueue.name]), - #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - rate_timer_ref = RateTRef, - sender_queues = SQ, - guid_ack = GA, - instructions = Instr, - fakes = Fakes } = - process_instructions(true, State), - true = queue:is_empty(Instr), %% ASSERTION {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), true = unlink(GM), gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, Fakes), + CPid, BQ, BQS, GM), %% We have to do the requeue via this init because otherwise we %% don't have access to the relevent MsgPropsFun. Also, we are %% already in mnesia as the master queue pid. Thus we cannot just @@ -394,19 +387,19 @@ enqueue_message(Delivery = #delivery { sender = ChPid }, SQ1 = dict:store(ChPid, queue:in(Delivery, Q), SQ), State1 = State #state { sender_queues = SQ1 }, case queue:is_empty(Q) of - true -> process_instructions(false, State1); + true -> process_instructions(State1); false -> {continue, State1} end. -process_instructions(Flush, State = #state { instructions = InstrQ }) -> +process_instructions(State = #state { instructions = InstrQ }) -> case queue:out(InstrQ) of {empty, _InstrQ} -> {continue, State}; {{value, Instr}, InstrQ1} -> - case process_instruction(Flush, Instr, State) of + case process_instruction(Instr, State) of {processed, State1} -> process_instructions( - Flush, State1 #state { instructions = InstrQ1 }); + State1 #state { instructions = InstrQ1 }); {stop, State1} -> {stop, State1 #state { instructions = InstrQ1 }}; blocked -> @@ -414,38 +407,20 @@ process_instructions(Flush, State = #state { instructions = InstrQ }) -> end end. -process_instruction(Flush, {publish, Deliver, Guid, MsgProps, ChPid} = Instr, +process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, State = #state { q = Q, sender_queues = SQ, backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA, - guid_to_channel = GTC, - fakes = Fakes }) -> + guid_to_channel = GTC }) -> case dict:find(ChPid, SQ) of error -> - case Flush of - true -> MQ = queue:from_list([fake_delivery(Q, Guid, ChPid)]), - State1 = State #state { - sender_queues = dict:store(ChPid, MQ, SQ), - fakes = sets:add_element(Guid, Fakes) }, - process_instruction(Flush, Instr, State1); - false -> blocked - end; + blocked; {ok, MQ} -> case queue:out(MQ) of {empty, _MQ} -> - case Flush of - true -> - MQ1 = queue:in_r(fake_delivery(Q, Guid, ChPid), MQ), - SQ1 = dict:store(ChPid, MQ1, SQ), - State1 = State #state { - sender_queues = SQ1, - fakes = sets:add_element(Guid, Fakes) }, - process_instruction(Flush, Instr, State1); - false -> - blocked - end; + blocked; {{value, Delivery = #delivery { message = Msg = #basic_message { guid = Guid } }}, MQ1} -> @@ -474,41 +449,28 @@ process_instruction(Flush, {publish, Deliver, Guid, MsgProps, ChPid} = Instr, guid_to_channel = GTC2 } end}; {{value, #delivery {}}, _MQ1} -> - MQ1 = queue:in_r(fake_delivery(Q, Guid, ChPid), MQ), - State1 = State #state { - sender_queues = dict:store(ChPid, MQ1, SQ), - fakes = sets:add_element(Guid, Fakes) }, - process_instruction(Flush, Instr, State1) + %% throw away the instruction: we'll never receive + %% the message to which it corresponds. + {processed, State} end end; -process_instruction(_Flush, {set_length, Length}, +process_instruction({set_length, Length}, State = #state { backing_queue = BQ, - backing_queue_state = BQS, - fakes = Fakes }) -> + backing_queue_state = BQS }) -> QLen = BQ:len(BQS), ToDrop = QLen - Length, {processed, case ToDrop > 0 of - true -> - {Guids, BQS1} = - lists:foldl( - fun (const, {GuidsN, BQSN}) -> - {{#basic_message { guid = Guid }, _IsDelivered, - _AckTag, _Remaining}, BQSN1} = - BQ:fetch(false, BQSN), - {[Guid | GuidsN], BQSN1} - end, BQS, lists:duplicate(ToDrop, const)), - Fakes1 = sets:difference(Fakes, sets:from_list(Guids)), - State #state { backing_queue_state = BQS1, - fakes = Fakes1 }; - false -> - State + true -> BQS1 = lists:foldl( + fun (const, BQSN) -> BQ:fetch(false, BQSN) end, + BQS, lists:duplicate(ToDrop, const)), + State #state { backing_queue_state = BQS1 }; + false -> State end}; -process_instruction(_Flush, {fetch, AckRequired, Guid, Remaining}, +process_instruction({fetch, AckRequired, Guid, Remaining}, State = #state { backing_queue = BQ, backing_queue_state = BQS, - guid_ack = GA, - fakes = Fakes }) -> + guid_ack = GA }) -> QLen = BQ:len(BQS), {processed, case QLen - 1 of @@ -519,28 +481,22 @@ process_instruction(_Flush, {fetch, AckRequired, Guid, Remaining}, true -> dict:store(Guid, AckTag, GA); false -> GA end, - Fakes1 = sets:del_element(Guid, Fakes), State #state { backing_queue_state = BQS1, - guid_ack = GA1, - fakes = Fakes1 }; + guid_ack = GA1 }; Other when Other < Remaining -> %% we must be shorter than the master - false = sets:is_element(Guid, Fakes), %% ASSERTION State end}; -process_instruction(_Flush, {ack, Guids}, +process_instruction({ack, Guids}, State = #state { backing_queue = BQ, backing_queue_state = BQS, - guid_ack = GA, - fakes = Fakes }) -> + guid_ack = GA }) -> {AckTags, GA1} = guids_to_acktags(Guids, GA), {Guids1, BQS1} = BQ:ack(AckTags, BQS), [] = Guids1 -- Guids, %% ASSERTION - Fakes1 = sets:difference(Fakes, sets:from_list(Guids)), {processed, State #state { guid_ack = GA1, - backing_queue_state = BQS1, - fakes = Fakes1 }}; -process_instruction(_Flush, {requeue, MsgPropsFun, Guids}, + backing_queue_state = BQS1 }}; +process_instruction({requeue, MsgPropsFun, Guids}, State = #state { backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA }) -> @@ -559,7 +515,7 @@ process_instruction(_Flush, {requeue, MsgPropsFun, Guids}, State #state { guid_ack = dict:new(), backing_queue_state = BQS2 } end}; -process_instruction(_Flush, delete_and_terminate, +process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:delete_and_terminate(BQS), @@ -578,14 +534,3 @@ guids_to_acktags(Guids, GA) -> ack_all(BQ, GA, BQS) -> BQ:ack([AckTag || {_Guid, AckTag} <- dict:to_list(GA)], BQS). - -fake_delivery(#amqqueue { name = QueueName }, Guid, ChPid) -> - ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>), - Msg = (rabbit_basic:message(ExchangeName, <<>>, #'P_basic'{}, <<>>)) - #basic_message { guid = Guid }, - #delivery { mandatory = false, - immediate = false, - txn = none, - sender = ChPid, - message = Msg, - msg_seq_no = undefined }. -- cgit v1.2.1 From 43236b06f80d58380a109ac2d2c8325bf8385004 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 20 Dec 2010 15:48:31 +0000 Subject: Give in and have the master put the pub msgs themselves on the gm. Avoiding this proves far too complex in all the failure cases (the worst being when the publishing node crashes - the master can receive the msg, but not the slaves. Worse, because of complexities like delegates, it's not even straightforward to monitor the publishers in order to be sure we're not going to receive more messages from them). We continue to have all msgs directly routed to all queues. Yes, this means that normally every slave receives every message twice, but this is genuinely the simplest and most secure route and protects against failures the best. --- src/rabbit_mirror_queue_master.erl | 62 ++++++---- src/rabbit_mirror_queue_slave.erl | 246 ++++++++++++++++++------------------- 2 files changed, 159 insertions(+), 149 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 0d64ab8e..94e93b3e 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -26,7 +26,7 @@ -export([start/1, stop/0]). --export([promote_backing_queue_state/4]). +-export([promote_backing_queue_state/5]). -behaviour(rabbit_backing_queue). @@ -36,7 +36,8 @@ coordinator, backing_queue, backing_queue_state, - set_delivered + set_delivered, + seen }). %% --------------------------------------------------------------------------- @@ -64,14 +65,16 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = 0 }. + set_delivered = 0, + seen = sets:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM) -> +promote_backing_queue_state(CPid, BQ, BQS, GM, Seen) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = BQ:len(BQS) }. + set_delivered = BQ:len(BQS), + seen = Seen }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -94,22 +97,31 @@ purge(State = #state { gm = GM, {Count, State #state { backing_queue_state = BQS1, set_delivered = 0 }}. -publish(Msg = #basic_message { guid = Guid }, - MsgProps, ChPid, State = #state { gm = GM, +publish(Msg = #basic_message { guid = Guid }, MsgProps, ChPid, + State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + seen = Seen }) -> + case sets:is_element(Guid, Seen) of + true -> State #state { seen = sets:del_element(Guid, Seen) }; + false -> ok = gm:broadcast(GM, {publish, false, ChPid, MsgProps, Msg}), + BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), + State #state { backing_queue_state = BQS1 } + end. + +publish_delivered(AckRequired, Msg = #basic_message { guid = Guid }, MsgProps, + ChPid, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {publish, false, Guid, MsgProps, ChPid}), - BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), - State #state { backing_queue_state = BQS1 }. - -publish_delivered(AckRequired, Msg = #basic_message { guid = Guid }, - MsgProps, ChPid, - State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {publish, {true, AckRequired}, Guid, MsgProps, ChPid}), - {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), - {AckTag, State #state { backing_queue_state = BQS1 }}. + backing_queue_state = BQS, + seen = Seen }) -> + case sets:is_element(Guid, Seen) of + true -> State #state { seen = sets:del_element(Guid, Seen) }; + false -> ok = gm:broadcast(GM, {publish, {true, AckRequired}, ChPid, + MsgProps, Msg}), + {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, + MsgProps, ChPid, BQS), + {AckTag, State #state { backing_queue_state = BQS1 }} + end. dropwhile(Fun, State = #state { gm = GM, backing_queue = BQ, @@ -126,7 +138,8 @@ dropwhile(Fun, State = #state { gm = GM, fetch(AckRequired, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = SetDelivered }) -> + set_delivered = SetDelivered, + seen = Seen }) -> {Result, BQS1} = BQ:fetch(AckRequired, BQS), State1 = State #state { backing_queue_state = BQS1 }, case Result of @@ -137,8 +150,13 @@ fetch(AckRequired, State = #state { gm = GM, ok = gm:broadcast(GM, {fetch, AckRequired, Guid, Remaining}), IsDelivered1 = IsDelivered orelse SetDelivered > 0, SetDelivered1 = lists:max([0, SetDelivered - 1]), + Seen1 = case SetDelivered + SetDelivered1 of + 1 -> sets:new(); %% transition to empty + _ -> Seen + end, {{Message, IsDelivered1, AckTag, Remaining}, - State1 #state { set_delivered = SetDelivered1 }} + State1 #state { set_delivered = SetDelivered1, + seen = Seen1 }} end. ack(AckTags, State = #state { gm = GM, diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index f124bc9e..deb1cc66 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -86,7 +86,7 @@ sender_queues, %% :: Pid -> MsgQ guid_ack, %% :: Guid -> AckTag - instructions, %% :: InstrQ + seen, %% Set Guid guid_to_channel %% for confirms }). @@ -140,7 +140,7 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), guid_ack = dict:new(), - instructions = queue:new(), + seen = sets:new(), guid_to_channel = dict:new() }, hibernate, @@ -153,12 +153,12 @@ init([#amqqueue { name = QueueName } = Q]) -> handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) -> %% Synchronous, "immediate" delivery mode gen_server2:reply(From, false), %% master may deliver it, not us - handle_process_result(enqueue_message(Delivery, State)); + noreply(maybe_enqueue_message(Delivery, State)); handle_call({deliver, Delivery = #delivery {}}, From, State) -> %% Synchronous, "mandatory" delivery mode gen_server2:reply(From, true), %% amqqueue throws away the result anyway - handle_process_result(enqueue_message(Delivery, State)); + noreply(maybe_enqueue_message(Delivery, State)); handle_call({gm_deaths, Deaths}, From, State = #state { q = #amqqueue { name = QueueName }, @@ -187,16 +187,12 @@ handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> noreply(maybe_run_queue_via_backing_queue(Fun, State)); -handle_cast({gm, Instruction}, State = #state { instructions = InstrQ }) -> - State1 = State #state { instructions = queue:in(Instruction, InstrQ) }, - case queue:is_empty(InstrQ) of - true -> handle_process_result(process_instructions(State1)); - false -> noreply(State1) - end; +handle_cast({gm, Instruction}, State) -> + handle_process_result(process_instruction(Instruction, State)); handle_cast({deliver, Delivery = #delivery {}}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. - handle_process_result(enqueue_message(Delivery, State)); + noreply(maybe_enqueue_message(Delivery, State)); handle_cast({set_maximum_since_use, Age}, State) -> ok = file_handle_cache:set_maximum_since_use(Age), @@ -317,8 +313,8 @@ maybe_confirm_message(Guid, GTC) -> GTC end. -handle_process_result({continue, State}) -> noreply(State); -handle_process_result({stop, State}) -> {stop, normal, State}. +handle_process_result({ok, State}) -> noreply(State); +handle_process_result({stop, State}) -> {stop, normal, State}. promote_me(From, #state { q = Q, gm = GM, @@ -326,6 +322,7 @@ promote_me(From, #state { q = Q, backing_queue_state = BQS, rate_timer_ref = RateTRef, sender_queues = SQ, + seen = Seen, guid_ack = GA }) -> rabbit_log:info("Promoting slave ~p for queue ~p~n", [self(), Q #amqqueue.name]), @@ -334,7 +331,7 @@ promote_me(From, #state { q = Q, gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM), + CPid, BQ, BQS, GM, Seen), %% We have to do the requeue via this init because otherwise we %% don't have access to the relevent MsgPropsFun. Also, we are %% already in mnesia as the master queue pid. Thus we cannot just @@ -378,115 +375,111 @@ stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> {ok, cancel} = timer:cancel(TRef), State #state { rate_timer_ref = undefined }. -enqueue_message(Delivery = #delivery { sender = ChPid }, - State = #state { sender_queues = SQ }) -> - Q = case dict:find(ChPid, SQ) of - {ok, Q1} -> Q1; - error -> queue:new() - end, - SQ1 = dict:store(ChPid, queue:in(Delivery, Q), SQ), - State1 = State #state { sender_queues = SQ1 }, - case queue:is_empty(Q) of - true -> process_instructions(State1); - false -> {continue, State1} - end. - -process_instructions(State = #state { instructions = InstrQ }) -> - case queue:out(InstrQ) of - {empty, _InstrQ} -> - {continue, State}; - {{value, Instr}, InstrQ1} -> - case process_instruction(Instr, State) of - {processed, State1} -> - process_instructions( - State1 #state { instructions = InstrQ1 }); - {stop, State1} -> - {stop, State1 #state { instructions = InstrQ1 }}; - blocked -> - {continue, State} - end +maybe_enqueue_message( + Delivery = #delivery { message = #basic_message { guid = Guid }, + sender = ChPid }, + State = #state { q = Q, + sender_queues = SQ, + seen = Seen, + guid_to_channel = GTC }) -> + case sets:is_element(Guid, Seen) of + true -> + GTC1 = record_confirm_or_confirm(Delivery, Q, GTC), + State #state { guid_to_channel = GTC1, + seen = sets:del_element(Guid, Seen) }; + false -> + MQ = case dict:find(ChPid, SQ) of + {ok, MQ1} -> MQ1; + error -> queue:new() + end, + SQ1 = dict:store(ChPid, queue:in(Delivery, MQ), SQ), + State #state { sender_queues = SQ1 } end. -process_instruction({publish, Deliver, Guid, MsgProps, ChPid}, - State = #state { q = Q, - sender_queues = SQ, - backing_queue = BQ, - backing_queue_state = BQS, - guid_ack = GA, - guid_to_channel = GTC }) -> - case dict:find(ChPid, SQ) of - error -> - blocked; - {ok, MQ} -> - case queue:out(MQ) of - {empty, _MQ} -> - blocked; - {{value, Delivery = #delivery { - message = Msg = #basic_message { guid = Guid } }}, - MQ1} -> - State1 = State #state { sender_queues = - dict:store(ChPid, MQ1, SQ) }, - GTC1 = record_confirm_or_confirm(Delivery, Q, GTC), - {processed, - case Deliver of - false -> - BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), - State1 #state { backing_queue_state = BQS1, - guid_to_channel = GTC1 }; - {true, AckRequired} -> - {AckTag, BQS1} = BQ:publish_delivered( - AckRequired, Msg, MsgProps, - ChPid, BQS), - {GA1, GTC2} = - case AckRequired of - true -> - {dict:store(Guid, AckTag, GA), GTC1}; - false -> - {GA, maybe_confirm_message(Guid, GTC1)} - end, - State1 #state { backing_queue_state = BQS1, - guid_ack = GA1, - guid_to_channel = GTC2 } - end}; - {{value, #delivery {}}, _MQ1} -> - %% throw away the instruction: we'll never receive - %% the message to which it corresponds. - {processed, State} - end - end; +process_instruction( + {publish, Deliver, ChPid, MsgProps, Msg = #basic_message { guid = Guid }}, + State = #state { q = Q, + sender_queues = SQ, + backing_queue = BQ, + backing_queue_state = BQS, + guid_ack = GA, + seen = Seen, + guid_to_channel = GTC }) -> + {SQ1, Seen1, GTC1} = + case dict:find(ChPid, SQ) of + error -> + {SQ, sets:add_element(Guid, Seen), GTC}; + {ok, MQ} -> + case queue:out(MQ) of + {empty, _MQ} -> + {SQ, sets:add_element(Guid, Seen), GTC}; + {{value, Delivery = #delivery { + message = #basic_message { guid = Guid } }}, + MQ1} -> + GTC2 = record_confirm_or_confirm(Delivery, Q, GTC), + {dict:store(ChPid, MQ1, SQ), Seen, GTC2}; + {{value, #delivery {}}, _MQ1} -> + %% The instruction was sent to us before we + %% were within the mirror_pids within the + %% amqqueue record. We'll never receive the + %% message directly. + {SQ, Seen, GTC} + end + end, + State1 = State #state { sender_queues = SQ1, + seen = Seen1, + guid_to_channel = GTC1 }, + {ok, + case Deliver of + false -> + BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), + State1 #state { backing_queue_state = BQS1 }; + {true, AckRequired} -> + {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, + ChPid, BQS), + {GA1, GTC3} = case AckRequired of + true -> {dict:store(Guid, AckTag, GA), GTC1}; + false -> {GA, maybe_confirm_message(Guid, GTC1)} + end, + State1 #state { backing_queue_state = BQS1, + guid_ack = GA1, + guid_to_channel = GTC3 } + end}; process_instruction({set_length, Length}, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> QLen = BQ:len(BQS), ToDrop = QLen - Length, - {processed, - case ToDrop > 0 of - true -> BQS1 = lists:foldl( - fun (const, BQSN) -> BQ:fetch(false, BQSN) end, - BQS, lists:duplicate(ToDrop, const)), - State #state { backing_queue_state = BQS1 }; - false -> State - end}; + {ok, case ToDrop > 0 of + true -> BQS1 = + lists:foldl( + fun (const, BQSN) -> + {{_Msg, _IsDelivered, _AckTag, _Remaining}, + BQSN1} = BQ:fetch(false, BQSN), + BQSN1 + end, BQS, lists:duplicate(ToDrop, const)), + State #state { backing_queue_state = BQS1 }; + false -> State + end}; process_instruction({fetch, AckRequired, Guid, Remaining}, State = #state { backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA }) -> QLen = BQ:len(BQS), - {processed, - case QLen - 1 of - Remaining -> - {{_Msg, _IsDelivered, AckTag, Remaining}, BQS1} = - BQ:fetch(AckRequired, BQS), - GA1 = case AckRequired of - true -> dict:store(Guid, AckTag, GA); - false -> GA - end, - State #state { backing_queue_state = BQS1, - guid_ack = GA1 }; - Other when Other < Remaining -> - %% we must be shorter than the master - State - end}; + {ok, case QLen - 1 of + Remaining -> + {{_Msg, _IsDelivered, AckTag, Remaining}, BQS1} = + BQ:fetch(AckRequired, BQS), + GA1 = case AckRequired of + true -> dict:store(Guid, AckTag, GA); + false -> GA + end, + State #state { backing_queue_state = BQS1, + guid_ack = GA1 }; + Other when Other < Remaining -> + %% we must be shorter than the master + State + end}; process_instruction({ack, Guids}, State = #state { backing_queue = BQ, backing_queue_state = BQS, @@ -494,27 +487,26 @@ process_instruction({ack, Guids}, {AckTags, GA1} = guids_to_acktags(Guids, GA), {Guids1, BQS1} = BQ:ack(AckTags, BQS), [] = Guids1 -- Guids, %% ASSERTION - {processed, State #state { guid_ack = GA1, - backing_queue_state = BQS1 }}; + {ok, State #state { guid_ack = GA1, + backing_queue_state = BQS1 }}; process_instruction({requeue, MsgPropsFun, Guids}, State = #state { backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA }) -> {AckTags, GA1} = guids_to_acktags(Guids, GA), - {processed, - case length(AckTags) =:= length(Guids) of - true -> - {Guids, BQS1} = BQ:requeue(AckTags, MsgPropsFun, BQS), - State #state { guid_ack = GA1, - backing_queue_state = BQS1 }; - false -> - %% the only thing we can safely do is nuke out our BQ and - %% GA - {_Count, BQS1} = BQ:purge(BQS), - {Guids, BQS2} = ack_all(BQ, GA, BQS1), - State #state { guid_ack = dict:new(), - backing_queue_state = BQS2 } - end}; + {ok, case length(AckTags) =:= length(Guids) of + true -> + {Guids, BQS1} = BQ:requeue(AckTags, MsgPropsFun, BQS), + State #state { guid_ack = GA1, + backing_queue_state = BQS1 }; + false -> + %% the only thing we can safely do is nuke out our BQ + %% and GA + {_Count, BQS1} = BQ:purge(BQS), + {Guids, BQS2} = ack_all(BQ, GA, BQS1), + State #state { guid_ack = dict:new(), + backing_queue_state = BQS2 } + end}; process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> -- cgit v1.2.1 From 51d312ef31f3ec74e16750f1c206b6d36933c7be Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 20 Dec 2010 16:17:04 +0000 Subject: Remove R14-ism. Also, cute feature - if you specify no nodes by name, you mean all known nodes --- src/rabbit_mirror_queue_master.erl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 94e93b3e..4f1bcc9d 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -57,8 +57,12 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, undefined), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), - [rabbit_mirror_queue_coordinator:add_slave(CPid, binary_to_atom(Node, utf8)) - || {longstr, Node} <- Nodes], + Nodes1 = case Nodes of + [] -> nodes(); + _ -> [list_to_atom(binary_to_list(Node)) || + {longstr, Node} <- Nodes] + end, + [rabbit_mirror_queue_coordinator:add_slave(CPid, Node) || Node <- Nodes1], {ok, BQ} = application:get_env(backing_queue_module), BQS = BQ:init(Q, Recover), #state { gm = GM, -- cgit v1.2.1 From a1a8b18d66376ef14b55ae1ff2b4b8736d97c623 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Dec 2010 13:23:06 +0000 Subject: Correct all manner of crazy wrongness with maybe_run_queue_via_backing_queue. The principle problem is that the state with which the function is to be run is dependent on the caller. Sometimes it's the 'top level' BQ state (eg invocations called by amqqueue_process). Sometimes it's the 'bottom level' state (eg invocations called by vq or qi or msg_store). We didn't used to have multiple layers, but now we do. Consequently, the invocation must indicate 'where' the fun is to be run. The clearest explanation is in master:invoke/3 --- include/rabbit_backing_queue_spec.hrl | 2 ++ src/rabbit_amqqueue.erl | 21 +++++++------- src/rabbit_amqqueue_process.erl | 38 +++++++++++++------------ src/rabbit_backing_queue.erl | 7 ++++- src/rabbit_mirror_queue_master.erl | 9 +++++- src/rabbit_mirror_queue_slave.erl | 15 +++++----- src/rabbit_variable_queue.erl | 52 +++++++++++++++++++---------------- 7 files changed, 84 insertions(+), 60 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index a330fe1e..fdb144d6 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -78,3 +78,5 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). +-spec(invoke/3 :: (atom(), fun ((A) -> A), state()) -> + {[rabbit_guid:guid()], state()}). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 52a41e71..0abe1198 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -33,8 +33,8 @@ -export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). -export([internal_declare/2, internal_delete/1, - maybe_run_queue_via_backing_queue/2, - maybe_run_queue_via_backing_queue_async/2, + maybe_run_queue_via_backing_queue/3, + maybe_run_queue_via_backing_queue_async/3, update_ram_duration/1, set_ram_duration_target/2, set_maximum_since_use/2, maybe_expire/1, drop_expired/1]). -export([pseudo_queue/2]). @@ -151,10 +151,10 @@ -spec(internal_delete/1 :: (name()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit()). --spec(maybe_run_queue_via_backing_queue/2 :: - (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). --spec(maybe_run_queue_via_backing_queue_async/2 :: - (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). +-spec(maybe_run_queue_via_backing_queue/3 :: + (pid(), atom(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). +-spec(maybe_run_queue_via_backing_queue_async/3 :: + (pid(), atom(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). -spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). @@ -459,11 +459,12 @@ internal_delete(QueueName) -> Deletions -> ok = rabbit_binding:process_deletions(Deletions) end. -maybe_run_queue_via_backing_queue(QPid, Fun) -> - gen_server2:call(QPid, {maybe_run_queue_via_backing_queue, Fun}, infinity). +maybe_run_queue_via_backing_queue(QPid, Mod, Fun) -> + gen_server2:call(QPid, {maybe_run_queue_via_backing_queue, Mod, Fun}, + infinity). -maybe_run_queue_via_backing_queue_async(QPid, Fun) -> - gen_server2:cast(QPid, {maybe_run_queue_via_backing_queue, Fun}). +maybe_run_queue_via_backing_queue_async(QPid, Mod, Fun) -> + gen_server2:cast(QPid, {maybe_run_queue_via_backing_queue, Mod, Fun}). update_ram_duration(QPid) -> gen_server2:cast(QPid, update_ram_duration). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 516f7b00..fedfd03a 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -266,7 +266,7 @@ ensure_sync_timer(State = #q{sync_timer_ref = undefined, backing_queue = BQ}) -> {ok, TRef} = timer:apply_after( ?SYNC_INTERVAL, rabbit_amqqueue, maybe_run_queue_via_backing_queue, - [self(), fun (BQS) -> {[], BQ:idle_timeout(BQS)} end]), + [self(), BQ, fun (BQS) -> {[], BQ:idle_timeout(BQS)} end]), State#q{sync_timer_ref = TRef}; ensure_sync_timer(State) -> State. @@ -559,11 +559,11 @@ deliver_or_enqueue(Delivery, State) -> requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl = TTL}) -> maybe_run_queue_via_backing_queue( - fun (BQS) -> - {_Guids, BQS1} = - BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS), - {[], BQS1} - end, State). + BQ, fun (BQS) -> + {_Guids, BQS1} = + BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS), + {[], BQS1} + end, State). fetch(AckRequired, State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> @@ -665,8 +665,10 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. -maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> - {Guids, BQS1} = Fun(BQS), +maybe_run_queue_via_backing_queue(Mod, Fun, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + {Guids, BQS1} = BQ:invoke(Mod, Fun, BQS), run_message_queue( confirm_messages(Guids, State#q{backing_queue_state = BQS1})). @@ -805,11 +807,11 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {maybe_run_queue_via_backing_queue, _Fun} -> 6; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {maybe_run_queue_via_backing_queue, _Mod, _Fun} -> 6; + _ -> 0 end. prioritise_cast(Msg, _State) -> @@ -1040,12 +1042,12 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> noreply(requeue_and_run(AckTags, State)) end; -handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> - reply(ok, maybe_run_queue_via_backing_queue(Fun, State)). +handle_call({maybe_run_queue_via_backing_queue, Mod, Fun}, _From, State) -> + reply(ok, maybe_run_queue_via_backing_queue(Mod, Fun, State)). -handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> - noreply(maybe_run_queue_via_backing_queue(Fun, State)); +handle_cast({maybe_run_queue_via_backing_queue, Mod, Fun}, State) -> + noreply(maybe_run_queue_via_backing_queue(Mod, Fun, State)); handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. @@ -1175,7 +1177,7 @@ handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) -> handle_info(timeout, State = #q{backing_queue = BQ}) -> noreply(maybe_run_queue_via_backing_queue( - fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State)); + BQ, fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State)); handle_info({'EXIT', _Pid, Reason}, State) -> {stop, Reason, State}; diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 7a728498..ffa6982a 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -137,7 +137,12 @@ behaviour_info(callbacks) -> %% Exists for debugging purposes, to be able to expose state via %% rabbitmqctl list_queues backing_queue_status - {status, 1} + {status, 1}, + + %% Passed a function to be invoked with the relevant backing + %% queue's state. Useful for when the backing queue or other + %% components need to pass functions into the backing queue. + {invoke, 3} ]; behaviour_info(_Other) -> undefined. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 4f1bcc9d..11831a29 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1]). + status/1, invoke/3]). -export([start/1, stop/0]). @@ -241,3 +241,10 @@ handle_pre_hibernate(State = #state { backing_queue = BQ, status(#state { backing_queue = BQ, backing_queue_state = BQS}) -> BQ:status(BQS). + +invoke(?MODULE, Fun, State) -> + Fun(State); +invoke(Mod, Fun, State = #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + {Guids, BQS1} = BQ:invoke(Mod, Fun, BQS), + {Guids, State #state { backing_queue_state = BQS1 }}. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index deb1cc66..a58e1579 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -180,12 +180,12 @@ handle_call({gm_deaths, Deaths}, From, {stop, normal, State} end; -handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> - reply(ok, maybe_run_queue_via_backing_queue(Fun, State)). +handle_call({maybe_run_queue_via_backing_queue, Mod, Fun}, _From, State) -> + reply(ok, maybe_run_queue_via_backing_queue(Mod, Fun, State)). -handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> - noreply(maybe_run_queue_via_backing_queue(Fun, State)); +handle_cast({maybe_run_queue_via_backing_queue, Mod, Fun}, State) -> + noreply(maybe_run_queue_via_backing_queue(Mod, Fun, State)); handle_cast({gm, Instruction}, State) -> handle_process_result(process_instruction(Instruction, State)); @@ -284,9 +284,10 @@ handle_msg([SPid], _From, Msg) -> %% --------------------------------------------------------------------------- maybe_run_queue_via_backing_queue( - Fun, State = #state { backing_queue_state = BQS, - guid_to_channel = GTC }) -> - {Guids, BQS1} = Fun(BQS), + Mod, Fun, State = #state { backing_queue = BQ, + backing_queue_state = BQS, + guid_to_channel = GTC }) -> + {Guids, BQS1} = BQ:invoke(Mod, Fun, BQS), GTC1 = lists:foldl(fun maybe_confirm_message/2, GTC, Guids), State #state { backing_queue_state = BQS1, guid_to_channel = GTC1 }. diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 09ead22b..ba77d185 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -37,7 +37,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1]). + status/1, invoke/3]). -export([start/1, stop/0]). @@ -865,6 +865,9 @@ status(#vqstate { {avg_ack_ingress_rate, AvgAckIngressRate}, {avg_ack_egress_rate , AvgAckEgressRate} ]. +invoke(?MODULE, Fun, State) -> + Fun(State). + %%---------------------------------------------------------------------------- %% Minor helpers %%---------------------------------------------------------------------------- @@ -1100,10 +1103,11 @@ blank_rate(Timestamp, IngressLength) -> msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> Self = self(), F = fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( - Self, fun (StateN) -> {[], tx_commit_post_msg_store( - true, Pubs, AckTags, - Fun, MsgPropsFun, StateN)} - end) + Self, ?MODULE, + fun (StateN) -> {[], tx_commit_post_msg_store( + true, Pubs, AckTags, + Fun, MsgPropsFun, StateN)} + end) end, fun () -> spawn(fun () -> ok = rabbit_misc:with_exit_handler( fun () -> remove_persistent_messages( @@ -1409,27 +1413,29 @@ msgs_confirmed(GuidSet, State) -> msgs_written_to_disk(QPid, GuidSet) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), - State #vqstate { - msgs_on_disk = - gb_sets:intersection( - gb_sets:union(MOD, GuidSet), UC) }) - end). + QPid, ?MODULE, + fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:intersection( + gb_sets:union(MOD, GuidSet), UC) }) + end). msg_indices_written_to_disk(QPid, GuidSet) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), - State #vqstate { - msg_indices_on_disk = - gb_sets:intersection( - gb_sets:union(MIOD, GuidSet), UC) }) - end). + QPid, ?MODULE, + fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:intersection( + gb_sets:union(MIOD, GuidSet), UC) }) + end). %%---------------------------------------------------------------------------- %% Phase changes -- cgit v1.2.1 From 96529337fddf02316bdbd78ad3d1c6f152972ca5 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Dec 2010 13:31:42 +0000 Subject: Use amqqueue:store_queue/1 so that we modify durable queues durably --- src/rabbit_amqqueue.erl | 1 + src/rabbit_mirror_queue_misc.erl | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 0abe1198..f04c5fec 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -48,6 +48,7 @@ -export([notify_sent/2, unblock/2, flush_all/2]). -export([commit_all/3, rollback_all/3, notify_down_all/2, limit_all/3]). -export([on_node_down/1]). +-export([store_queue/1]). -include("rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 05602076..090cb812 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -39,7 +39,7 @@ remove_from_queue(QueueName, DeadPids) -> _ -> Q1 = Q #amqqueue { pid = QPid1, mirror_pids = MPids1 }, - mnesia:write(rabbit_queue, Q1, write), + ok = rabbit_amqqueue:store_queue(Q1), {ok, QPid1} end end -- cgit v1.2.1 From e8915154deb02dfb64c2b71a336af39c44d835d0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Dec 2010 13:36:43 +0000 Subject: idiot --- src/rabbit_mirror_queue_slave.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index a58e1579..a61cea0d 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -266,7 +266,7 @@ members_changed([SPid], _Births, Deaths) -> rabbit_misc:with_exit_handler( fun () -> {stop, normal} end, fun () -> - case gen_server2:call(SPid, {gm_deaths, Deaths}) of + case gen_server2:call(SPid, {gm_deaths, Deaths}, infinity) of ok -> ok; {promote, CPid} -> -- cgit v1.2.1 From e32b21bc5a8fd1a167e540f09b9d92bcd87056dc Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Dec 2010 15:40:11 +0000 Subject: Make the addition of slave nodes more robust. This is especially important for recovery of durable queues --- src/rabbit_mirror_queue_coordinator.erl | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 6303952d..608148b5 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -70,9 +70,17 @@ handle_call(get_gm, _From, State = #state { gm = GM }) -> reply(GM, State). handle_cast({add_slave, Node}, State = #state { q = Q }) -> - Result = rabbit_mirror_queue_slave_sup:start_child(Node, [Q]), - rabbit_log:info("Adding slave node for queue ~p: ~p~n", - [Q #amqqueue.name, Result]), + Nodes = nodes(), + case lists:member(Node, Nodes) of + true -> + Result = rabbit_mirror_queue_slave_sup:start_child(Node, [Q]), + rabbit_log:info("Adding slave node for queue ~p: ~p~n", + [Q #amqqueue.name, Result]); + false -> + rabbit_log:info( + "Ignoring request to add slave on node ~p for queue ~p~n", + [Q #amqqueue.name, Node]) + end, noreply(State); handle_cast({gm_deaths, Deaths}, -- cgit v1.2.1 From fe3a8699396d5ea3d9e4d0f67ab411adbf9a24d5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 13:39:08 +0000 Subject: Sketch of how clustered upgrades might work. --- src/rabbit_mnesia.erl | 81 ++++++++++++++++++++++++++++------------ src/rabbit_queue_index.erl | 2 +- src/rabbit_upgrade.erl | 52 ++++++++++++++++---------- src/rabbit_upgrade_functions.erl | 33 ++++++++++++++-- 4 files changed, 120 insertions(+), 48 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 11f5e410..2550bdd4 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1]). + forget_other_nodes/0, empty_ram_only_tables/0, copy_db/1]). -export([table_names/0]). @@ -66,6 +66,7 @@ -spec(is_clustered/0 :: () -> boolean()). -spec(running_clustered_nodes/0 :: () -> [node()]). -spec(all_clustered_nodes/0 :: () -> [node()]). +-spec(forget_other_nodes/0 :: () -> 'ok'). -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). @@ -126,8 +127,8 @@ cluster(ClusterNodes, Force) -> %% return node to its virgin state, where it is not member of any %% cluster, has no cluster configuration, no local database, and no %% persisted messages -reset() -> reset(false). -force_reset() -> reset(true). +reset() -> reset(all). +force_reset() -> reset(force_all). is_clustered() -> RunningNodes = running_clustered_nodes(), @@ -139,6 +140,10 @@ all_clustered_nodes() -> running_clustered_nodes() -> mnesia:system_info(running_db_nodes). +forget_other_nodes() -> + Nodes = all_clustered_nodes() -- [node()], + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes]. + empty_ram_only_tables() -> Node = node(), lists:foreach( @@ -385,32 +390,54 @@ init_db(ClusterNodes, Force) -> {[], true, [_]} -> %% True single disc node, attempt upgrade ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade() of + case rabbit_upgrade:maybe_upgrade([mnesia, local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; {[], true, _} -> %% "Master" (i.e. without config) disc node in cluster, - %% verify schema + %% do upgrade ok = wait_for_tables(), - ensure_version_ok(rabbit_upgrade:read_version()), - ensure_schema_ok(); + case rabbit_upgrade:maybe_upgrade([mnesia, local]) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; {[], false, _} -> %% Nothing there at all, start from scratch ok = create_schema(); {[AnotherNode|_], _, _} -> %% Subsequent node in cluster, catch up - ensure_version_ok(rabbit_upgrade:read_version()), - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), + case IsDiskNode of + true -> + %% TODO test this branch ;) + %% TODO don't just reset every time we start up! + mnesia:stop(), + reset(mnesia), + mnesia:start(), + %% TODO what should we ensure? + %% ensure_version_ok(rabbit_upgrade:read_version()), + %% ensure_version_ok( + %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + %% TODO needed? + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(disc); + false -> + ok = wait_for_replicated_tables(), + %% TODO can we live without this on disc? + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(ram), + case rabbit_upgrade:maybe_upgrade([local]) of + ok -> + ok; + %% If we're just starting up a new node + %% we won't have a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end + end, ensure_schema_ok() end; {error, Reason} -> @@ -563,12 +590,15 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. -reset(Force) -> +%% Mode: force_all - get rid of everything unconditionally +%% all - get rid of everything, conditional on Mnesia working +%% mnesia - just get rid of Mnesia, leave everything else +reset(Mode) -> ok = ensure_mnesia_not_running(), Node = node(), - case Force of - true -> ok; - false -> + case Mode of + force_all -> ok; + _ -> ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), {Nodes, RunningNodes} = @@ -583,9 +613,14 @@ reset(Force) -> rabbit_misc:ensure_ok(mnesia:delete_schema([Node]), cannot_delete_schema) end, - ok = delete_cluster_nodes_config(), - %% remove persisted messages and any other garbage we find - ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")), + case Mode of + mnesia -> + ok; + _ -> + ok = delete_cluster_nodes_config(), + %% remove persisted messages and any other garbage we find + ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")) + end, ok. leave_cluster([], _) -> ok; diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 76c0a4ef..6adcd8b0 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -182,7 +182,7 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({add_queue_ttl, []}). +-rabbit_upgrade({add_queue_ttl, local, []}). -ifdef(use_specs). diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 97a07514..dee08f48 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/0, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/1, read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -33,9 +33,10 @@ -ifdef(use_specs). -type(step() :: atom()). +-type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/0 :: () -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -47,24 +48,28 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade() -> +maybe_upgrade(Scopes) -> case read_version() of {ok, CurrentHeads} -> with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> case upgrades_to_apply(CurrentHeads, G) of - [] -> ok; - Upgrades -> apply_upgrades(Upgrades) - end; - Unknown -> throw({error, - {future_upgrades_found, Unknown}}) - end - end); + fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, G) end); {error, enoent} -> version_not_available end. +maybe_upgrade_graph(CurrentHeads, Scopes, G) -> + case unknown_heads(CurrentHeads, G) of + [] -> + case upgrades_to_apply(CurrentHeads, Scopes, G) of + [] -> + ok; + Upgrades -> + apply_upgrades(Upgrades, lists:member(mnesia, Scopes)) + end; + Unknown -> + throw({error, {future_upgrades_found, Unknown}}) + end. + read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [Heads]} -> {ok, Heads}; @@ -98,16 +103,17 @@ with_upgrade_graph(Fun) -> end. vertices(Module, Steps) -> - [{StepName, {Module, StepName}} || {StepName, _Reqs} <- Steps]. + [{StepName, {Scope, {Module, StepName}}} || + {StepName, Scope, _Reqs} <- Steps]. edges(_Module, Steps) -> - [{Require, StepName} || {StepName, Requires} <- Steps, Require <- Requires]. - + [{Require, StepName} || {StepName, _Scope, Requires} <- Steps, + Require <- Requires]. unknown_heads(Heads, G) -> [H || H <- Heads, digraph:vertex(G, H) =:= false]. -upgrades_to_apply(Heads, G) -> +upgrades_to_apply(Heads, Scopes, G) -> %% Take all the vertices which can reach the known heads. That's %% everything we've already applied. Subtract that from all %% vertices: that's what we have to apply. @@ -117,15 +123,17 @@ upgrades_to_apply(Heads, G) -> sets:from_list(digraph_utils:reaching(Heads, G)))), %% Form a subgraph from that list and find a topological ordering %% so we can invoke them in order. - [element(2, digraph:vertex(G, StepName)) || - StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. + Sorted = [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))], + %% Only return the upgrades for the appropriate scopes + [Upgrade || {Scope, Upgrade} <- Sorted, lists:member(Scope, Scopes)]. heads(G) -> lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). %% ------------------------------------------------------------------- -apply_upgrades(Upgrades) -> +apply_upgrades(Upgrades, ForgetOthers) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -140,6 +148,10 @@ apply_upgrades(Upgrades) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + case ForgetOthers of + true -> rabbit_mnesia:forget_other_nodes(); + _ -> ok + end, [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 7848c848..43e468ff 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -24,10 +24,14 @@ -compile([export_all]). --rabbit_upgrade({remove_user_scope, []}). --rabbit_upgrade({hash_passwords, []}). --rabbit_upgrade({add_ip_to_listener, []}). --rabbit_upgrade({internal_exchanges, []}). +-rabbit_upgrade({remove_user_scope, mnesia, []}). +-rabbit_upgrade({hash_passwords, mnesia, []}). +-rabbit_upgrade({add_ip_to_listener, mnesia, []}). +-rabbit_upgrade({internal_exchanges, mnesia, []}). + +-rabbit_upgrade({one, mnesia, []}). +-rabbit_upgrade({two, local, [one]}). +-rabbit_upgrade({three, mnesia, [two]}). %% ------------------------------------------------------------------- @@ -85,6 +89,27 @@ internal_exchanges() -> || T <- Tables ], ok. +one() -> + mnesia( + rabbit_user, + fun ({user, Username, Hash, IsAdmin}) -> + {user, Username, Hash, IsAdmin, foo} + end, + [username, password_hash, is_admin, extra]). + +two() -> + ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), + [test]). + +three() -> + mnesia( + rabbit_user, + fun ({user, Username, Hash, IsAdmin, _}) -> + {user, Username, Hash, IsAdmin} + end, + [username, password_hash, is_admin]). + + %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 0b093ecb559424e2b2c7809cba5dc2cbdfab710c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 14:18:51 +0000 Subject: These two cases are the same. --- src/rabbit_mnesia.erl | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 2550bdd4..f1e007a1 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -386,26 +386,19 @@ init_db(ClusterNodes, Force) -> end; true -> ok end, - case {Nodes, mnesia:system_info(use_dir), all_clustered_nodes()} of - {[], true, [_]} -> - %% True single disc node, attempt upgrade + case {Nodes, mnesia:system_info(use_dir)} of + {[], true} -> + %% True single disc node, or master" (i.e. without + %% config) disc node in cluster, attempt upgrade ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade([mnesia, local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; - {[], true, _} -> - %% "Master" (i.e. without config) disc node in cluster, - %% do upgrade - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local]) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; - {[], false, _} -> + {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[AnotherNode|_], _, _} -> + {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), -- cgit v1.2.1 From 50a9fc4fb471d68225090f0b0fe39ead5110012b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:09:18 +0000 Subject: Make disc node reclustering work, various cleanups. --- src/rabbit_mnesia.erl | 94 +++++++++++++++++++++++--------------------------- src/rabbit_upgrade.erl | 24 ++++++------- 2 files changed, 53 insertions(+), 65 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f1e007a1..e5929f86 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - forget_other_nodes/0, empty_ram_only_tables/0, copy_db/1]). + empty_ram_only_tables/0, copy_db/1]). -export([table_names/0]). @@ -66,7 +66,6 @@ -spec(is_clustered/0 :: () -> boolean()). -spec(running_clustered_nodes/0 :: () -> [node()]). -spec(all_clustered_nodes/0 :: () -> [node()]). --spec(forget_other_nodes/0 :: () -> 'ok'). -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). @@ -127,8 +126,8 @@ cluster(ClusterNodes, Force) -> %% return node to its virgin state, where it is not member of any %% cluster, has no cluster configuration, no local database, and no %% persisted messages -reset() -> reset(all). -force_reset() -> reset(force_all). +reset() -> reset(false). +force_reset() -> reset(true). is_clustered() -> RunningNodes = running_clustered_nodes(), @@ -388,10 +387,11 @@ init_db(ClusterNodes, Force) -> end, case {Nodes, mnesia:system_info(use_dir)} of {[], true} -> - %% True single disc node, or master" (i.e. without + %% True single disc node, or "master" (i.e. without %% config) disc node in cluster, attempt upgrade ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local]) of + case rabbit_upgrade:maybe_upgrade( + [mnesia, local], fun forget_other_nodes/0) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; @@ -400,37 +400,27 @@ init_db(ClusterNodes, Force) -> ok = create_schema(); {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case IsDiskNode of - true -> - %% TODO test this branch ;) - %% TODO don't just reset every time we start up! - mnesia:stop(), - reset(mnesia), - mnesia:start(), - %% TODO what should we ensure? - %% ensure_version_ok(rabbit_upgrade:read_version()), - %% ensure_version_ok( - %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - %% TODO needed? - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(disc); - false -> - ok = wait_for_replicated_tables(), - %% TODO can we live without this on disc? - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(ram), - case rabbit_upgrade:maybe_upgrade([local]) of - ok -> - ok; - %% If we're just starting up a new node - %% we won't have a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end + %% TODO what should we ensure? + %% ensure_version_ok(rabbit_upgrade:read_version()), + %% ensure_version_ok( + %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + Type = case ClusterNodes == [] orelse + lists:member(node(), ClusterNodes) of + true -> disc; + false -> ram + end, + case rabbit_upgrade:maybe_upgrade( + [local], reset_fun(ProperClusterNodes)) of + ok -> + ok; + %% If we're just starting up a new node + %% we won't have a version + version_not_available -> + ok = rabbit_upgrade:write_version() end, + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(Type), ensure_schema_ok() end; {error, Reason} -> @@ -470,6 +460,16 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. +reset_fun(ProperClusterNodes) -> + fun() -> + mnesia:stop(), + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema), + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), + {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes) + end. + create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), @@ -583,15 +583,12 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. -%% Mode: force_all - get rid of everything unconditionally -%% all - get rid of everything, conditional on Mnesia working -%% mnesia - just get rid of Mnesia, leave everything else -reset(Mode) -> +reset(Force) -> ok = ensure_mnesia_not_running(), Node = node(), - case Mode of - force_all -> ok; - _ -> + case Force of + true -> ok; + false -> ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), {Nodes, RunningNodes} = @@ -606,14 +603,9 @@ reset(Mode) -> rabbit_misc:ensure_ok(mnesia:delete_schema([Node]), cannot_delete_schema) end, - case Mode of - mnesia -> - ok; - _ -> - ok = delete_cluster_nodes_config(), - %% remove persisted messages and any other garbage we find - ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")) - end, + ok = delete_cluster_nodes_config(), + %% remove persisted messages and any other garbage we find + ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")), ok. leave_cluster([], _) -> ok; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dee08f48..7e59faaf 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/1, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/2, read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -36,7 +36,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +%% TODO update +%%-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -48,23 +49,21 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade(Scopes) -> +maybe_upgrade(Scopes, Fun) -> case read_version() of {ok, CurrentHeads} -> with_upgrade_graph( - fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, G) end); + fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) end); {error, enoent} -> version_not_available end. -maybe_upgrade_graph(CurrentHeads, Scopes, G) -> +maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) -> case unknown_heads(CurrentHeads, G) of [] -> case upgrades_to_apply(CurrentHeads, Scopes, G) of - [] -> - ok; - Upgrades -> - apply_upgrades(Upgrades, lists:member(mnesia, Scopes)) + [] -> ok; + Upgrades -> apply_upgrades(Upgrades, Fun) end; Unknown -> throw({error, {future_upgrades_found, Unknown}}) @@ -133,7 +132,7 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, ForgetOthers) -> +apply_upgrades(Upgrades, Fun) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -148,10 +147,7 @@ apply_upgrades(Upgrades, ForgetOthers) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - case ForgetOthers of - true -> rabbit_mnesia:forget_other_nodes(); - _ -> ok - end, + Fun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From 34ca7a82d250748ea59d92aa499cb562c8332ae4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:12:07 +0000 Subject: Revert arbitrary difference from default. --- src/rabbit_mnesia.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e5929f86..d8086b56 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -404,11 +404,8 @@ init_db(ClusterNodes, Force) -> %% ensure_version_ok(rabbit_upgrade:read_version()), %% ensure_version_ok( %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - Type = case ClusterNodes == [] orelse - lists:member(node(), ClusterNodes) of - true -> disc; - false -> ram - end, + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), case rabbit_upgrade:maybe_upgrade( [local], reset_fun(ProperClusterNodes)) of ok -> @@ -420,7 +417,10 @@ init_db(ClusterNodes, Force) -> end, ok = wait_for_replicated_tables(), ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(Type), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), ensure_schema_ok() end; {error, Reason} -> -- cgit v1.2.1 From 8aaef521a855a3df1223e3b1abeafe204b1e58b6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:24:59 +0000 Subject: Fix spec --- src/rabbit_mnesia.erl | 6 ++++-- src/rabbit_upgrade.erl | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index d8086b56..11e9a178 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -141,7 +141,8 @@ running_clustered_nodes() -> forget_other_nodes() -> Nodes = all_clustered_nodes() -- [node()], - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes]. + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes], + ok. empty_ram_only_tables() -> Node = node(), @@ -467,7 +468,8 @@ reset_fun(ProperClusterNodes) -> cannot_delete_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes) + {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes), + ok end. create_schema() -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 7e59faaf..48c00d69 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -36,8 +36,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). -%% TODO update -%%-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/2 :: ([scope()], fun (() -> 'ok')) + -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -147,7 +147,7 @@ apply_upgrades(Upgrades, Fun) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - Fun(), + ok = Fun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From 00dd61ca4b2372d698225ea3e58a932bdd1baffc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 4 Jan 2011 16:34:23 +0000 Subject: Check our version matches the remote version. --- src/rabbit_mnesia.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 11e9a178..82e2a30e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -401,10 +401,8 @@ init_db(ClusterNodes, Force) -> ok = create_schema(); {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up - %% TODO what should we ensure? - %% ensure_version_ok(rabbit_upgrade:read_version()), - %% ensure_version_ok( - %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), case rabbit_upgrade:maybe_upgrade( -- cgit v1.2.1 From cec5a2c8548dcc6c7a7ad44c7b72361adca1fccb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 6 Jan 2011 17:50:21 +0000 Subject: Decide the node to do mnesia upgrades based on which was the last disc node to shut down. Blow up with a hopefully helpful error message if the "wrong" disc node is started first. This works; you can now upgrade a disc-only cluster. --- src/rabbit_mnesia.erl | 115 ++++++++++++++++++++++++++++++++++--------------- src/rabbit_upgrade.erl | 18 ++++---- 2 files changed, 91 insertions(+), 42 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 82e2a30e..49d04116 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -43,6 +43,8 @@ %% other mnesia-using Erlang applications, such as ejabberd -export([create_tables/0]). +-define(EXAMPLE_RABBIT_TABLE, rabbit_durable_exchange). + -include("rabbit.hrl"). %%---------------------------------------------------------------------------- @@ -164,7 +166,7 @@ nodes_of_type(Type) -> %% Specifically, we check whether a certain table, which we know %% will be written to disk on a disc node, is stored on disk or in %% RAM. - mnesia:table_info(rabbit_durable_exchange, Type). + mnesia:table_info(?EXAMPLE_RABBIT_TABLE, Type). table_definitions() -> [{rabbit_user, @@ -387,40 +389,50 @@ init_db(ClusterNodes, Force) -> true -> ok end, case {Nodes, mnesia:system_info(use_dir)} of - {[], true} -> - %% True single disc node, or "master" (i.e. without - %% config) disc node in cluster, attempt upgrade - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade( - [mnesia, local], fun forget_other_nodes/0) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[AnotherNode|_], _} -> - %% Subsequent node in cluster, catch up - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case rabbit_upgrade:maybe_upgrade( - [local], reset_fun(ProperClusterNodes)) of - ok -> - ok; - %% If we're just starting up a new node - %% we won't have a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end, - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), - ensure_schema_ok() + {_, _} -> + DiscNodes = mnesia:table_info(schema, disc_copies), + case are_we_upgrader(DiscNodes) of + true -> + %% True single disc node, or last disc + %% node in cluster to shut down, attempt + %% upgrade + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade( + [mnesia, local], + fun () -> ok end, + fun forget_other_nodes/0) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; + false -> + %% Subsequent node in cluster, catch up + %% TODO how to do this? + %% ensure_version_ok( + %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + case rabbit_upgrade:maybe_upgrade( + [local], + ensure_nodes_running_fun(DiscNodes), + reset_fun(DiscNodes -- [node()])) of + ok -> + ok; + %% If we're just starting up a new node + %% we won't have a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end, + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + ensure_schema_ok() + end end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -459,17 +471,52 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -reset_fun(ProperClusterNodes) -> +ensure_nodes_running_fun(Nodes) -> + fun() -> + case nodes_running(Nodes) of + [] -> + exit("Cluster upgrade needed. The first node you start " + "should be the last node to be shut down."); + _ -> + ok + end + end. + +reset_fun(Nodes) -> fun() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - {ok, _} = mnesia:change_config(extra_db_nodes, ProperClusterNodes), + {ok, _} = mnesia:change_config(extra_db_nodes, Nodes), ok end. +%% Were we the last node in the cluster to shut down or is there no cluster? +%% The answer to this is yes if: +%% * We are our canonical source for reading a table +%% - If the canonical source is "nowhere" or another node, we are out of date +%% * No other nodes are running Mnesia and have finished booting Rabbit. +%% - Since any node will be its own canonical source once the cluster is up. + +are_we_upgrader(Nodes) -> + Where = mnesia:table_info(?EXAMPLE_RABBIT_TABLE, where_to_read), + Node = node(), + case {Where, nodes_running(Nodes)} of + {Node, []} -> true; + {_, _} -> false + end. + +nodes_running(Nodes) -> + [N || N <- Nodes, node_running(N)]. + +node_running(Node) -> + case rpc:call(Node, application, which_applications, []) of + {badrpc, _} -> false; + Apps -> lists:keysearch(rabbit, 1, Apps) =/= false + end. + create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 48c00d69..c852a0f9 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/2, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/3, read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -36,7 +36,7 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/2 :: ([scope()], fun (() -> 'ok')) +-spec(maybe_upgrade/3 :: ([scope()], fun (() -> 'ok'), fun (() -> 'ok')) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). @@ -49,21 +49,22 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade(Scopes, Fun) -> +maybe_upgrade(Scopes, GuardFun, UpgradeFun) -> case read_version() of {ok, CurrentHeads} -> with_upgrade_graph( - fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) end); + fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, + GuardFun, UpgradeFun, G) end); {error, enoent} -> version_not_available end. -maybe_upgrade_graph(CurrentHeads, Scopes, Fun, G) -> +maybe_upgrade_graph(CurrentHeads, Scopes, GuardFun, UpgradeFun, G) -> case unknown_heads(CurrentHeads, G) of [] -> case upgrades_to_apply(CurrentHeads, Scopes, G) of [] -> ok; - Upgrades -> apply_upgrades(Upgrades, Fun) + Upgrades -> apply_upgrades(Upgrades, GuardFun, UpgradeFun) end; Unknown -> throw({error, {future_upgrades_found, Unknown}}) @@ -132,7 +133,8 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, Fun) -> +apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> + GuardFun(), LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -147,7 +149,7 @@ apply_upgrades(Upgrades, Fun) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - ok = Fun(), + ok = UpgradeFun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From d39b09caeb77f61ead9d1621bf808b6d5272d9bb Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 6 Jan 2011 19:04:33 +0000 Subject: Sender-specified distribution First attempt for direct exchanges only --- src/rabbit_exchange_type_direct.erl | 16 +++++++++++++--- src/rabbit_misc.erl | 11 ++++++++++- src/rabbit_router.erl | 21 +++++++++++++++++++-- 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index d49d0199..ab688853 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -31,6 +31,7 @@ -module(rabbit_exchange_type_direct). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -50,9 +51,18 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. -route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey}}) -> - rabbit_router:match_routing_key(Name, RoutingKey). +route(#exchange{name = #resource{virtual_host = VHost} = Name}, + #delivery{message = #basic_message{routing_key = RoutingKey, + content = Content}}) -> + BindingRoutes = rabbit_router:match_routing_key(Name, RoutingKey), + HeaderRKeys = + case (Content#content.properties)#'P_basic'.headers of + undefined -> []; + Headers -> rabbit_misc:table_lookup(Headers, <<"CC">>, <<0>>) ++ + rabbit_misc:table_lookup(Headers, <<"BCC">>, <<0>>) + end, + HeaderRoutes = [rabbit_misc:r(VHost, queue, RKey) || RKey <- HeaderRKeys], + lists:usort(BindingRoutes ++ HeaderRoutes). validate(_X) -> ok. create(_X) -> ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 15ba787a..604346ed 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -40,7 +40,7 @@ protocol_error/3, protocol_error/4, protocol_error/1]). -export([not_found/1, assert_args_equivalence/4]). -export([dirty_read/1]). --export([table_lookup/2]). +-export([table_lookup/3, table_lookup/2]). -export([r/3, r/2, r_arg/4, rs/1]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). @@ -112,6 +112,8 @@ 'ok' | rabbit_types:connection_exit()). -spec(dirty_read/1 :: ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). +-spec(table_lookup/3 :: + (rabbit_framing:amqp_table(), binary(), binary()) -> [binary()]). -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). @@ -253,6 +255,13 @@ dirty_read(ReadSpec) -> [] -> {error, not_found} end. +table_lookup(Table, Key, Separator) -> + case table_lookup(Table, Key) of + undefined -> []; + {longstr, BinVal} -> binary:split(BinVal, Separator, [global]); + _ -> [] + end. + table_lookup(Table, Key) -> case lists:keysearch(Key, 1, Table) of {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin}; diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index d49c072c..2f556df7 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -32,6 +32,7 @@ -module(rabbit_router). -include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -export([deliver/2, match_bindings/2, match_routing_key/2]). @@ -68,22 +69,38 @@ deliver(QNames, Delivery = #delivery{mandatory = false, %% is preserved. This scales much better than the non-immediate %% case below. QPids = lookup_qpids(QNames), + ModifiedDelivery = strip_header(Delivery, <<"BCC">>), delegate:invoke_no_result( - QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, Delivery) end), + QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), {routed, QPids}; deliver(QNames, Delivery = #delivery{mandatory = Mandatory, immediate = Immediate}) -> QPids = lookup_qpids(QNames), + ModifiedDelivery = strip_header(Delivery, <<"BCC">>), {Success, _} = delegate:invoke(QPids, fun (Pid) -> - rabbit_amqqueue:deliver(Pid, Delivery) + rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), {Routed, Handled} = lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). +strip_header(Delivery = #delivery{message = Message = #basic_message{ + content = Content = #content{ + properties = Props = #'P_basic'{headers = Headers}}}}, + Key) when Headers =/= undefined -> + case lists:keyfind(Key, 1, Headers) of + false -> Delivery; + Tuple -> Headers0 = lists:delete(Tuple, Headers), + Delivery#delivery{message = Message#basic_message{ + content = Content#content{ + properties_bin = none, + properties = Props#'P_basic'{headers = Headers0}}}} + end; +strip_header(Delivery, _Key) -> + Delivery. %% TODO: Maybe this should be handled by a cursor instead. %% TODO: This causes a full scan for each entry with the same source -- cgit v1.2.1 From 9ab02c62b7edda1a097912b1f0194788df15f2ff Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 7 Jan 2011 11:54:35 +0000 Subject: Ironically our dummy upgrades now need to be upgraded. --- src/rabbit_upgrade_functions.erl | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 8fee70af..1806c40f 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -91,11 +91,21 @@ internal_exchanges() -> || T <- Tables ], ok. +user_to_internal_user() -> + mnesia( + rabbit_user, + fun({user, Username, PasswordHash, IsAdmin}) -> + {internal_user, Username, PasswordHash, IsAdmin} + end, + [username, password_hash, is_admin], internal_user). + + + one() -> mnesia( rabbit_user, - fun ({user, Username, Hash, IsAdmin}) -> - {user, Username, Hash, IsAdmin, foo} + fun ({internal_user, Username, Hash, IsAdmin}) -> + {internal_user, Username, Hash, IsAdmin, foo} end, [username, password_hash, is_admin, extra]). @@ -106,20 +116,11 @@ two() -> three() -> mnesia( rabbit_user, - fun ({user, Username, Hash, IsAdmin, _}) -> - {user, Username, Hash, IsAdmin} + fun ({internal_user, Username, Hash, IsAdmin, _}) -> + {internal_user, Username, Hash, IsAdmin} end, [username, password_hash, is_admin]). - -user_to_internal_user() -> - mnesia( - rabbit_user, - fun({user, Username, PasswordHash, IsAdmin}) -> - {internal_user, Username, PasswordHash, IsAdmin} - end, - [username, password_hash, is_admin], internal_user). - %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From d235fbe0db6c709860e8fa19d7917484ca902d2e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 7 Jan 2011 12:07:12 +0000 Subject: Refactor a bit, reinstate ensure_version_ok check. --- src/rabbit_mnesia.erl | 86 +++++++++++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c536c64f..9ea1be28 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -393,46 +393,7 @@ init_db(ClusterNodes, Force) -> %% Nothing there at all, start from scratch ok = create_schema(); {_, _} -> - DiscNodes = mnesia:table_info(schema, disc_copies), - case are_we_upgrader(DiscNodes) of - true -> - %% True single disc node, or last disc - %% node in cluster to shut down, attempt - %% upgrade - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade( - [mnesia, local], - fun () -> ok end, - fun forget_other_nodes/0) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; - false -> - %% Subsequent node in cluster, catch up - %% TODO how to do this? - %% ensure_version_ok( - %% rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case rabbit_upgrade:maybe_upgrade( - [local], - ensure_nodes_running_fun(DiscNodes), - reset_fun(DiscNodes -- [node()])) of - ok -> - ok; - %% If we're just starting up a new node - %% we won't have a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end, - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), - ensure_schema_ok() - end + ok = setup_existing_node(ClusterNodes, Nodes) end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -441,6 +402,49 @@ init_db(ClusterNodes, Force) -> throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) end. +setup_existing_node(ClusterNodes, Nodes) -> + DiscNodes = mnesia:table_info(schema, disc_copies), + case are_we_upgrader(DiscNodes) of + true -> + %% True single disc node, or last disc node in cluster to + %% shut down, attempt upgrade + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade( + [mnesia, local], fun () -> ok end, + fun forget_other_nodes/0) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; + false -> + %% Subsequent node in cluster, catch up + case Nodes of + [AnotherNode|_] -> + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])); + [] -> + ok + end, + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + case rabbit_upgrade:maybe_upgrade( + [local], ensure_nodes_running_fun(DiscNodes), + reset_fun(DiscNodes -- [node()])) of + ok -> + ok; + %% If we're just starting up a new node we won't have + %% a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end, + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + ensure_schema_ok() + end. + schema_ok_or_move() -> case check_schema_integrity() of ok -> @@ -476,7 +480,7 @@ ensure_nodes_running_fun(Nodes) -> case nodes_running(Nodes) of [] -> exit("Cluster upgrade needed. The first node you start " - "should be the last node to be shut down."); + "should be the last disc node to be shut down."); _ -> ok end -- cgit v1.2.1 From d1e659c8536e4bdd855d881eb2b1b6ea7def180a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 7 Jan 2011 13:23:21 +0000 Subject: Cosmetic --- src/rabbit_mnesia.erl | 27 ++++++++++++++++----------- src/rabbit_upgrade.erl | 2 +- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 9ea1be28..ca84b29e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -407,11 +407,10 @@ setup_existing_node(ClusterNodes, Nodes) -> case are_we_upgrader(DiscNodes) of true -> %% True single disc node, or last disc node in cluster to - %% shut down, attempt upgrade + %% shut down, attempt upgrade if necessary ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade( - [mnesia, local], fun () -> ok end, - fun forget_other_nodes/0) of + case rabbit_upgrade:maybe_upgrade([mnesia, local], fun () -> ok end, + fun forget_other_nodes/0) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; @@ -427,7 +426,8 @@ setup_existing_node(ClusterNodes, Nodes) -> IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), case rabbit_upgrade:maybe_upgrade( - [local], ensure_nodes_running_fun(DiscNodes), + [local], + ensure_nodes_running_fun(DiscNodes), reset_fun(DiscNodes -- [node()])) of ok -> ok; @@ -475,9 +475,9 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -ensure_nodes_running_fun(Nodes) -> +ensure_nodes_running_fun(DiscNodes) -> fun() -> - case nodes_running(Nodes) of + case nodes_running(DiscNodes) of [] -> exit("Cluster upgrade needed. The first node you start " "should be the last disc node to be shut down."); @@ -486,23 +486,28 @@ ensure_nodes_running_fun(Nodes) -> end end. -reset_fun(Nodes) -> +reset_fun(OtherNodes) -> fun() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - {ok, _} = mnesia:change_config(extra_db_nodes, Nodes), + {ok, _} = mnesia:change_config(extra_db_nodes, OtherNodes), ok end. %% Were we the last node in the cluster to shut down or is there no cluster? %% The answer to this is yes if: %% * We are our canonical source for reading a table -%% - If the canonical source is "nowhere" or another node, we are out of date +%% - If the canonical source is "nowhere" or another node, we are out +%% of date +%% and %% * No other nodes are running Mnesia and have finished booting Rabbit. -%% - Since any node will be its own canonical source once the cluster is up. +%% - Since any node will be its own canonical source once the cluster +%% is up, but just having Mnesia running is not enough - that node +%% could be halfway through starting (and deciding it is the upgrader +%% too) are_we_upgrader(Nodes) -> Where = mnesia:table_info(?EXAMPLE_RABBIT_TABLE, where_to_read), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index c852a0f9..3a78dd7f 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -134,7 +134,7 @@ heads(G) -> %% ------------------------------------------------------------------- apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> - GuardFun(), + ok = GuardFun(), LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> -- cgit v1.2.1 From af1a5fa2320b99d421f84c09e1fa8e2594ba3950 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 10 Jan 2011 17:25:13 +0000 Subject: Move the upgrade tests earlier in the boot process. This doesn't work either, just committing it in order not to lose it. --- src/rabbit_mnesia.erl | 107 +++++++++++++++++++++++++------------------------ src/rabbit_upgrade.erl | 53 +++++++++++++----------- 2 files changed, 84 insertions(+), 76 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index ca84b29e..a11347ff 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -94,6 +94,7 @@ status() -> {running_nodes, running_clustered_nodes()}]. init() -> + ok = maybe_reset_for_upgrades(), ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true), @@ -141,11 +142,6 @@ all_clustered_nodes() -> running_clustered_nodes() -> mnesia:system_info(running_db_nodes). -forget_other_nodes() -> - Nodes = all_clustered_nodes() -- [node()], - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Nodes], - ok. - empty_ram_only_tables() -> Node = node(), lists:foreach( @@ -404,17 +400,17 @@ init_db(ClusterNodes, Force) -> setup_existing_node(ClusterNodes, Nodes) -> DiscNodes = mnesia:table_info(schema, disc_copies), - case are_we_upgrader(DiscNodes) of - true -> - %% True single disc node, or last disc node in cluster to - %% shut down, attempt upgrade if necessary + Node = node(), + case upgrader(DiscNodes) of + Node -> + %% True single disc node, or upgrader node - attempt + %% upgrade if necessary ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local], fun () -> ok end, - fun forget_other_nodes/0) of + case rabbit_upgrade:maybe_upgrade([mnesia, local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; - false -> + _ -> %% Subsequent node in cluster, catch up case Nodes of [AnotherNode|_] -> @@ -423,12 +419,8 @@ setup_existing_node(ClusterNodes, Nodes) -> [] -> ok end, - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - case rabbit_upgrade:maybe_upgrade( - [local], - ensure_nodes_running_fun(DiscNodes), - reset_fun(DiscNodes -- [node()])) of + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade([local]) of ok -> ok; %% If we're just starting up a new node we won't have @@ -436,13 +428,21 @@ setup_existing_node(ClusterNodes, Nodes) -> version_not_available -> ok = rabbit_upgrade:write_version() end, + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), ok = create_local_table_copy(schema, disc_copies), ok = create_local_table_copies(case IsDiskNode of true -> disc; false -> ram end), - ensure_schema_ok() + ensure_schema_ok(), + %% If we're just starting up a new node we won't have + %% a version + case rabbit_upgrade:read_version() of + {error, _} -> rabbit_upgrade:write_version(); + _ -> ok + end end. schema_ok_or_move() -> @@ -475,50 +475,48 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -ensure_nodes_running_fun(DiscNodes) -> - fun() -> - case nodes_running(DiscNodes) of - [] -> - exit("Cluster upgrade needed. The first node you start " - "should be the last disc node to be shut down."); +maybe_reset_for_upgrades() -> + case rabbit_upgrade:upgrade_required([mnesia]) of + true -> + DiscNodes = all_clustered_nodes(), + Upgrader = upgrader(DiscNodes), + case node() of + Upgrader -> + reset_for_primary_upgrade(DiscNodes); _ -> - ok - end + reset_for_non_primary_upgrade(Upgrader, DiscNodes) + end; + false -> + ok end. -reset_fun(OtherNodes) -> - fun() -> +reset_for_primary_upgrade(DiscNodes) -> + Others = DiscNodes -- [node()], + ensure_mnesia_running(), + force_tables(), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], + ok. + +reset_for_non_primary_upgrade(Upgrader, DiscNodes) -> + case node_running(Upgrader) of + false -> + exit(lists:flatten( + io_lib:format( + "Cluster upgrade needed. Please start node ~s first", + [Upgrader]))); + true -> + OtherNodes = DiscNodes -- [node()], mnesia:stop(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), + mnesia:start(), {ok, _} = mnesia:change_config(extra_db_nodes, OtherNodes), ok end. -%% Were we the last node in the cluster to shut down or is there no cluster? -%% The answer to this is yes if: -%% * We are our canonical source for reading a table -%% - If the canonical source is "nowhere" or another node, we are out -%% of date -%% and -%% * No other nodes are running Mnesia and have finished booting Rabbit. -%% - Since any node will be its own canonical source once the cluster -%% is up, but just having Mnesia running is not enough - that node -%% could be halfway through starting (and deciding it is the upgrader -%% too) - -are_we_upgrader(Nodes) -> - Where = mnesia:table_info(?EXAMPLE_RABBIT_TABLE, where_to_read), - Node = node(), - case {Where, nodes_running(Nodes)} of - {Node, []} -> true; - {_, _} -> false - end. - -nodes_running(Nodes) -> - [N || N <- Nodes, node_running(N)]. +upgrader(Nodes) -> + [Upgrader|_] = lists:usort(Nodes), + Upgrader. node_running(Node) -> case rpc:call(Node, application, which_applications, []) of @@ -639,6 +637,9 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. +force_tables() -> + [mnesia:force_load_table(T) || T <- table_names()]. + reset(Force) -> ok = ensure_mnesia_not_running(), Node = node(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 3a78dd7f..260f85a1 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,8 @@ -module(rabbit_upgrade). --export([maybe_upgrade/3, read_version/0, write_version/0, desired_version/0]). +-export([maybe_upgrade/1, upgrade_required/1]). +-export([read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -36,8 +37,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). --spec(maybe_upgrade/3 :: ([scope()], fun (() -> 'ok'), fun (() -> 'ok')) - -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +-spec(upgrade_required/1 :: ([scope()]) -> boolean()). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -49,25 +50,18 @@ %% Try to upgrade the schema. If no information on the existing schema %% could be found, do nothing. rabbit_mnesia:check_schema_integrity() %% will catch the problem. -maybe_upgrade(Scopes, GuardFun, UpgradeFun) -> - case read_version() of - {ok, CurrentHeads} -> - with_upgrade_graph( - fun (G) -> maybe_upgrade_graph(CurrentHeads, Scopes, - GuardFun, UpgradeFun, G) end); - {error, enoent} -> - version_not_available +maybe_upgrade(Scopes) -> + case upgrades_required(Scopes) of + version_not_available -> version_not_available; + [] -> ok; + Upgrades -> apply_upgrades(Upgrades) end. -maybe_upgrade_graph(CurrentHeads, Scopes, GuardFun, UpgradeFun, G) -> - case unknown_heads(CurrentHeads, G) of - [] -> - case upgrades_to_apply(CurrentHeads, Scopes, G) of - [] -> ok; - Upgrades -> apply_upgrades(Upgrades, GuardFun, UpgradeFun) - end; - Unknown -> - throw({error, {future_upgrades_found, Unknown}}) +upgrade_required(Scopes) -> + case upgrades_required(Scopes) of + version_not_available -> false; + [] -> false; + _ -> true end. read_version() -> @@ -85,6 +79,21 @@ desired_version() -> %% ------------------------------------------------------------------- +upgrades_required(Scopes) -> + case read_version() of + {ok, CurrentHeads} -> + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> upgrades_to_apply(CurrentHeads, Scopes, G); + Unknown -> throw({error, + {future_upgrades_found, Unknown}}) + end + end); + {error, enoent} -> + version_not_available + end. + with_upgrade_graph(Fun) -> case rabbit_misc:build_acyclic_graph( fun vertices/2, fun edges/2, @@ -133,8 +142,7 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> - ok = GuardFun(), +apply_upgrades(Upgrades) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -149,7 +157,6 @@ apply_upgrades(Upgrades, GuardFun, UpgradeFun) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), - ok = UpgradeFun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), -- cgit v1.2.1 From 0fef8fdcc755596782543d432a7103d5c7dd90fc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 12:44:43 +0000 Subject: Holy %$*! it works. Still needs tidying up somewhat... --- src/rabbit_mnesia.erl | 99 ++++++++---------------------------------------- src/rabbit_prelaunch.erl | 4 +- src/rabbit_upgrade.erl | 77 +++++++++++++++++++++++++++++++------ 3 files changed, 82 insertions(+), 98 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a11347ff..345ca82a 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1]). + empty_ram_only_tables/0, copy_db/1, create_cluster_nodes_config/1]). -export([table_names/0]). @@ -94,7 +94,6 @@ status() -> {running_nodes, running_clustered_nodes()}]. init() -> - ok = maybe_reset_for_upgrades(), ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true), @@ -399,35 +398,19 @@ init_db(ClusterNodes, Force) -> end. setup_existing_node(ClusterNodes, Nodes) -> - DiscNodes = mnesia:table_info(schema, disc_copies), - Node = node(), - case upgrader(DiscNodes) of - Node -> - %% True single disc node, or upgrader node - attempt - %% upgrade if necessary + case Nodes of + [] -> + %% We're the first node up ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([mnesia, local]) of + case rabbit_upgrade:maybe_upgrade([local]) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; - _ -> + [AnotherNode|_] -> %% Subsequent node in cluster, catch up - case Nodes of - [AnotherNode|_] -> - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])); - [] -> - ok - end, + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([local]) of - ok -> - ok; - %% If we're just starting up a new node we won't have - %% a version - version_not_available -> - ok = rabbit_upgrade:write_version() - end, IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), @@ -436,13 +419,13 @@ setup_existing_node(ClusterNodes, Nodes) -> true -> disc; false -> ram end), - ensure_schema_ok(), - %% If we're just starting up a new node we won't have - %% a version - case rabbit_upgrade:read_version() of - {error, _} -> rabbit_upgrade:write_version(); - _ -> ok - end + case rabbit_upgrade:maybe_upgrade([local]) of + ok -> ok; + %% If we're just starting up a new node we won't have + %% a version + version_not_available -> ok = rabbit_upgrade:write_version() + end, + ensure_schema_ok() end. schema_ok_or_move() -> @@ -475,55 +458,6 @@ ensure_schema_ok() -> {error, Reason} -> throw({error, {schema_invalid, Reason}}) end. -maybe_reset_for_upgrades() -> - case rabbit_upgrade:upgrade_required([mnesia]) of - true -> - DiscNodes = all_clustered_nodes(), - Upgrader = upgrader(DiscNodes), - case node() of - Upgrader -> - reset_for_primary_upgrade(DiscNodes); - _ -> - reset_for_non_primary_upgrade(Upgrader, DiscNodes) - end; - false -> - ok - end. - -reset_for_primary_upgrade(DiscNodes) -> - Others = DiscNodes -- [node()], - ensure_mnesia_running(), - force_tables(), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], - ok. - -reset_for_non_primary_upgrade(Upgrader, DiscNodes) -> - case node_running(Upgrader) of - false -> - exit(lists:flatten( - io_lib:format( - "Cluster upgrade needed. Please start node ~s first", - [Upgrader]))); - true -> - OtherNodes = DiscNodes -- [node()], - mnesia:stop(), - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema), - mnesia:start(), - {ok, _} = mnesia:change_config(extra_db_nodes, OtherNodes), - ok - end. - -upgrader(Nodes) -> - [Upgrader|_] = lists:usort(Nodes), - Upgrader. - -node_running(Node) -> - case rpc:call(Node, application, which_applications, []) of - {badrpc, _} -> false; - Apps -> lists:keysearch(rabbit, 1, Apps) =/= false - end. - create_schema() -> mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), @@ -637,9 +571,6 @@ wait_for_tables(TableNames) -> throw({error, {failed_waiting_for_tables, Reason}}) end. -force_tables() -> - [mnesia:force_load_table(T) || T <- table_names()]. - reset(Force) -> ok = ensure_mnesia_not_running(), Node = node(), diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 8ae45abd..c5ee63ba 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -250,8 +250,8 @@ post_process_script(ScriptFile) -> {error, {failed_to_load_script, Reason}} end. -process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> - [{apply,{rabbit,prepare,[]}}, Entry]; +process_entry(Entry = {apply,{application,start_boot,[mnesia,permanent]}}) -> + [{apply,{rabbit_upgrade,maybe_upgrade_mnesia,[]}}, Entry]; process_entry(Entry) -> [Entry]. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 260f85a1..9f9e8806 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -21,7 +21,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade/1, upgrade_required/1]). +-export([maybe_upgrade_mnesia/0, maybe_upgrade/1]). -export([read_version/0, write_version/0, desired_version/0]). -include("rabbit.hrl"). @@ -37,8 +37,8 @@ -type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). +-spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). --spec(upgrade_required/1 :: ([scope()]) -> boolean()). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -47,9 +47,69 @@ %% ------------------------------------------------------------------- -%% Try to upgrade the schema. If no information on the existing schema -%% could be found, do nothing. rabbit_mnesia:check_schema_integrity() -%% will catch the problem. +maybe_upgrade_mnesia() -> + rabbit:prepare(), + case upgrades_required([mnesia]) of + Upgrades = [_|_] -> + DiscNodes = rabbit_mnesia:all_clustered_nodes(), + Upgrader = upgrader(DiscNodes), + case node() of + Upgrader -> + primary_upgrade(Upgrades, DiscNodes); + _ -> + non_primary_upgrade(Upgrader, DiscNodes) + end; + [] -> + ok; + version_not_available -> + ok + end. + +upgrader(Nodes) -> + [Upgrader|_] = lists:usort(Nodes), + Upgrader. + +primary_upgrade(Upgrades, DiscNodes) -> + Others = DiscNodes -- [node()], + %% TODO this should happen after backing up! + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), + force_tables(), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], + apply_upgrades(Upgrades), + ok. + +force_tables() -> + [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. + +non_primary_upgrade(Upgrader, DiscNodes) -> + case node_running(Upgrader) of + false -> + Msg = "~n~n * Cluster upgrade needed. Please start node ~s " + "first. * ~n~n~n", + Args = [Upgrader], + %% We don't throw or exit here since that gets thrown + %% straight out into do_boot, generating an erl_crash.dump + %% and displaying any error message in a confusing way. + error_logger:error_msg(Msg, Args), + io:format(Msg, Args), + error_logger:logfile(close), + halt(1); + true -> + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema), + ok = rabbit_mnesia:create_cluster_nodes_config(DiscNodes), + ok + end. + +node_running(Node) -> + case rpc:call(Node, application, which_applications, []) of + {badrpc, _} -> false; + Apps -> lists:keysearch(rabbit, 1, Apps) =/= false + end. + +%% ------------------------------------------------------------------- + maybe_upgrade(Scopes) -> case upgrades_required(Scopes) of version_not_available -> version_not_available; @@ -57,13 +117,6 @@ maybe_upgrade(Scopes) -> Upgrades -> apply_upgrades(Upgrades) end. -upgrade_required(Scopes) -> - case upgrades_required(Scopes) of - version_not_available -> false; - [] -> false; - _ -> true - end. - read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [Heads]} -> {ok, Heads}; -- cgit v1.2.1 From a153921362e59e87f5052e5ce80f765425777b59 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 13:34:02 +0000 Subject: One DAG per scope. --- src/rabbit_mnesia.erl | 4 +- src/rabbit_upgrade.erl | 82 +++++++++++++++++++++------------------- src/rabbit_upgrade_functions.erl | 4 +- 3 files changed, 48 insertions(+), 42 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 345ca82a..997b12d4 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -402,7 +402,7 @@ setup_existing_node(ClusterNodes, Nodes) -> [] -> %% We're the first node up ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade([local]) of + case rabbit_upgrade:maybe_upgrade(local) of ok -> ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; @@ -419,7 +419,7 @@ setup_existing_node(ClusterNodes, Nodes) -> true -> disc; false -> ram end), - case rabbit_upgrade:maybe_upgrade([local]) of + case rabbit_upgrade:maybe_upgrade(local) of ok -> ok; %% If we're just starting up a new node we won't have %% a version diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9f9e8806..4bdff65a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -28,17 +28,18 @@ -define(VERSION_FILENAME, "schema_version"). -define(LOCK_FILENAME, "schema_upgrade_lock"). +-define(SCOPES, [mnesia, local]). %% ------------------------------------------------------------------- -ifdef(use_specs). -type(step() :: atom()). --type(scope() :: 'mnesia' | 'local'). -type(version() :: [step()]). +-type(scope() :: 'mnesia' | 'local'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). --spec(maybe_upgrade/1 :: ([scope()]) -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade/1 :: (scope()) -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -49,8 +50,8 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), - case upgrades_required([mnesia]) of - Upgrades = [_|_] -> + case upgrades_required(mnesia) of + [_|_] = Upgrades -> DiscNodes = rabbit_mnesia:all_clustered_nodes(), Upgrader = upgrader(DiscNodes), case node() of @@ -72,8 +73,7 @@ upgrader(Nodes) -> primary_upgrade(Upgrades, DiscNodes) -> Others = DiscNodes -- [node()], %% TODO this should happen after backing up! - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], apply_upgrades(Upgrades), @@ -110,8 +110,8 @@ node_running(Node) -> %% ------------------------------------------------------------------- -maybe_upgrade(Scopes) -> - case upgrades_required(Scopes) of +maybe_upgrade(Scope) -> + case upgrades_required(Scope) of version_not_available -> version_not_available; [] -> ok; Upgrades -> apply_upgrades(Upgrades) @@ -128,34 +128,41 @@ write_version() -> ok. desired_version() -> - with_upgrade_graph(fun (G) -> heads(G) end). + lists:append( + [with_upgrade_graph(fun (_, G) -> heads(G) end, Scope, []) + || Scope <- ?SCOPES]). %% ------------------------------------------------------------------- -upgrades_required(Scopes) -> +upgrades_required(Scope) -> case read_version() of {ok, CurrentHeads} -> - with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> upgrades_to_apply(CurrentHeads, Scopes, G); - Unknown -> throw({error, - {future_upgrades_found, Unknown}}) - end - end); + with_upgrade_graph(fun upgrades_to_apply/2, Scope, CurrentHeads); {error, enoent} -> version_not_available end. -with_upgrade_graph(Fun) -> +with_upgrade_graph(Fun, Scope, CurrentHeads) -> + G0 = make_graph(Scope), + Gs = [G0|[make_graph(S) || S <- ?SCOPES -- [Scope]]], + try + Known = lists:append([digraph:vertices(G) || G <- Gs]), + case unknown_heads(CurrentHeads, Known) of + [] -> ok; + Unknown -> throw({error, {future_upgrades_found, Unknown}}) + end, + Fun(CurrentHeads, G0) + after + [true = digraph:delete(G) || G <- Gs] + end. + +make_graph(Scope) -> case rabbit_misc:build_acyclic_graph( - fun vertices/2, fun edges/2, + fun (Module, Steps) -> vertices(Module, Steps, Scope) end, + fun (Module, Steps) -> edges(Module, Steps, Scope) end, rabbit_misc:all_module_attributes(rabbit_upgrade)) of - {ok, G} -> try - Fun(G) - after - true = digraph:delete(G) - end; + {ok, G} -> + G; {error, {vertex, duplicate, StepName}} -> throw({error, {duplicate_upgrade_step, StepName}}); {error, {edge, {bad_vertex, StepName}, _From, _To}} -> @@ -164,18 +171,19 @@ with_upgrade_graph(Fun) -> throw({error, {cycle_in_upgrade_steps, StepNames}}) end. -vertices(Module, Steps) -> - [{StepName, {Scope, {Module, StepName}}} || - {StepName, Scope, _Reqs} <- Steps]. +vertices(Module, Steps, Scope0) -> + [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps, + Scope0 == Scope1]. -edges(_Module, Steps) -> - [{Require, StepName} || {StepName, _Scope, Requires} <- Steps, - Require <- Requires]. +edges(_Module, Steps, Scope0) -> + [{Require, StepName} || {StepName, Scope1, Requires} <- Steps, + Require <- Requires, + Scope0 == Scope1]. -unknown_heads(Heads, G) -> - [H || H <- Heads, digraph:vertex(G, H) =:= false]. +unknown_heads(Heads, Known) -> + lists:filter(fun(H) -> not lists:member(H, Known) end, Heads). -upgrades_to_apply(Heads, Scopes, G) -> +upgrades_to_apply(Heads, G) -> %% Take all the vertices which can reach the known heads. That's %% everything we've already applied. Subtract that from all %% vertices: that's what we have to apply. @@ -185,10 +193,8 @@ upgrades_to_apply(Heads, Scopes, G) -> sets:from_list(digraph_utils:reaching(Heads, G)))), %% Form a subgraph from that list and find a topological ordering %% so we can invoke them in order. - Sorted = [element(2, digraph:vertex(G, StepName)) || - StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))], - %% Only return the upgrades for the appropriate scopes - [Upgrade || {Scope, Upgrade} <- Sorted, lists:member(Scope, Scopes)]. + [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. heads(G) -> lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 4068b090..b9b46f9a 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -31,8 +31,8 @@ -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). -rabbit_upgrade({one, mnesia, [user_to_internal_user]}). --rabbit_upgrade({two, local, [one]}). --rabbit_upgrade({three, mnesia, [two]}). +-rabbit_upgrade({two, mnesia, [one]}). +-rabbit_upgrade({three, local, []}). %% ------------------------------------------------------------------- -- cgit v1.2.1 From 70c6ce665144f6d85a160e842c4cdfe543865ef4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 13:44:41 +0000 Subject: Break the cluster *after* taking the backup. --- src/rabbit_mnesia.erl | 9 +-------- src/rabbit_upgrade.erl | 19 ++++++++++++------- src/rabbit_upgrade_functions.erl | 8 ++++---- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 997b12d4..26fda4e9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -492,14 +492,7 @@ move_db() -> ok. copy_db(Destination) -> - mnesia:stop(), - case rabbit_misc:recursive_copy(dir(), Destination) of - ok -> - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = wait_for_tables(); - {error, E} -> - {error, E} - end. + rabbit_misc:recursive_copy(dir(), Destination). create_tables() -> lists:foreach(fun ({Tab, TabDef}) -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 4bdff65a..d0fdbf08 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -72,11 +72,15 @@ upgrader(Nodes) -> primary_upgrade(Upgrades, DiscNodes) -> Others = DiscNodes -- [node()], - %% TODO this should happen after backing up! - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - force_tables(), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) || Node <- Others], - apply_upgrades(Upgrades), + apply_upgrades( + Upgrades, + fun () -> + info("Upgrades: Breaking cluster~n", []), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + force_tables(), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] + end), ok. force_tables() -> @@ -114,7 +118,7 @@ maybe_upgrade(Scope) -> case upgrades_required(Scope) of version_not_available -> version_not_available; [] -> ok; - Upgrades -> apply_upgrades(Upgrades) + Upgrades -> apply_upgrades(Upgrades, fun() -> ok end) end. read_version() -> @@ -201,7 +205,7 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades) -> +apply_upgrades(Upgrades, Fun) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> @@ -216,6 +220,7 @@ apply_upgrades(Upgrades) -> %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + Fun(), [apply_upgrade(Upgrade) || Upgrade <- Upgrades], info("Upgrades: All upgrades applied successfully~n", []), ok = write_version(), diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index b9b46f9a..151b498d 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -110,10 +110,6 @@ one() -> [username, password_hash, is_admin, extra]). two() -> - ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), - [test]). - -three() -> mnesia( rabbit_user, fun ({internal_user, Username, Hash, IsAdmin, _}) -> @@ -121,6 +117,10 @@ three() -> end, [username, password_hash, is_admin]). +three() -> + ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), + [test]). + %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 31cef377dff7bdfce6bff9b802ad0dd22d3341a1 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:03:29 +0000 Subject: Store the version as an orddict keyed on different scopes, and thus don't assert that everything is done after the first upgrade. --- src/rabbit_upgrade.erl | 98 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 35 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index d0fdbf08..9d6263fe 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -35,7 +35,7 @@ -ifdef(use_specs). -type(step() :: atom()). --type(version() :: [step()]). +-type(version() :: [{scope(), [step()]}]). -type(scope() :: 'mnesia' | 'local'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). @@ -73,9 +73,10 @@ upgrader(Nodes) -> primary_upgrade(Upgrades, DiscNodes) -> Others = DiscNodes -- [node()], apply_upgrades( + mnesia, Upgrades, fun () -> - info("Upgrades: Breaking cluster~n", []), + info("mnesia upgrades: Breaking cluster~n", []), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), [{atomic, ok} = mnesia:del_table_copy(schema, Node) @@ -118,55 +119,80 @@ maybe_upgrade(Scope) -> case upgrades_required(Scope) of version_not_available -> version_not_available; [] -> ok; - Upgrades -> apply_upgrades(Upgrades, fun() -> ok end) + Upgrades -> apply_upgrades(Scope, Upgrades, + fun() -> ok end) end. read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [Heads]} -> {ok, Heads}; + {ok, [V]} -> case orddict:find(mnesia, V) of + error -> {ok, convert_old_version(V)}; + _ -> {ok, V} + end; {error, _} = Err -> Err end. +read_version(Scope) -> + case read_version() of + {error, _} = E -> E; + {ok, V} -> {ok, orddict:fetch(Scope, V)} + end. + write_version() -> ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), ok. +write_version(Scope) -> + {ok, V0} = read_version(), + V = orddict:store(Scope, desired_version(Scope), V0), + ok = rabbit_misc:write_term_file(schema_filename(), [V]), + ok. + desired_version() -> - lists:append( - [with_upgrade_graph(fun (_, G) -> heads(G) end, Scope, []) - || Scope <- ?SCOPES]). + lists:foldl( + fun (Scope, Acc) -> + orddict:store(Scope, desired_version(Scope), Acc) + end, + orddict:new(), ?SCOPES). + +desired_version(Scope) -> + with_upgrade_graph(fun (G) -> heads(G) end, Scope). + +convert_old_version(Heads) -> + Locals = [add_queue_ttl], + V0 = orddict:new(), + V1 = orddict:store(mnesia, Heads -- Locals, V0), + orddict:store(local, + lists:filter(fun(H) -> lists:member(H, Locals) end, Heads), + V1). %% ------------------------------------------------------------------- upgrades_required(Scope) -> - case read_version() of + case read_version(Scope) of {ok, CurrentHeads} -> - with_upgrade_graph(fun upgrades_to_apply/2, Scope, CurrentHeads); + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> upgrades_to_apply(CurrentHeads, G); + Unknown -> throw({error, + {future_upgrades_found, Unknown}}) + end + end, Scope); {error, enoent} -> version_not_available end. -with_upgrade_graph(Fun, Scope, CurrentHeads) -> - G0 = make_graph(Scope), - Gs = [G0|[make_graph(S) || S <- ?SCOPES -- [Scope]]], - try - Known = lists:append([digraph:vertices(G) || G <- Gs]), - case unknown_heads(CurrentHeads, Known) of - [] -> ok; - Unknown -> throw({error, {future_upgrades_found, Unknown}}) - end, - Fun(CurrentHeads, G0) - after - [true = digraph:delete(G) || G <- Gs] - end. - -make_graph(Scope) -> +with_upgrade_graph(Fun, Scope) -> case rabbit_misc:build_acyclic_graph( fun (Module, Steps) -> vertices(Module, Steps, Scope) end, fun (Module, Steps) -> edges(Module, Steps, Scope) end, rabbit_misc:all_module_attributes(rabbit_upgrade)) of - {ok, G} -> - G; + {ok, G} -> try + Fun(G) + after + true = digraph:delete(G) + end; {error, {vertex, duplicate, StepName}} -> throw({error, {duplicate_upgrade_step, StepName}}); {error, {edge, {bad_vertex, StepName}, _From, _To}} -> @@ -205,12 +231,12 @@ heads(G) -> %% ------------------------------------------------------------------- -apply_upgrades(Upgrades, Fun) -> +apply_upgrades(Scope, Upgrades, Fun) -> LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of ok -> BackupDir = dir() ++ "-upgrade-backup", - info("Upgrades: ~w to apply~n", [length(Upgrades)]), + info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), case rabbit_mnesia:copy_db(BackupDir) of ok -> %% We need to make the backup after creating the @@ -219,13 +245,15 @@ apply_upgrades(Upgrades, Fun) -> %% the lock file exists in the backup too, which %% is not intuitive. Remove it. ok = file:delete(lock_filename(BackupDir)), - info("Upgrades: Mnesia dir backed up to ~p~n", [BackupDir]), + info("~s upgrades: Mnesia dir backed up to ~p~n", + [Scope, BackupDir]), Fun(), - [apply_upgrade(Upgrade) || Upgrade <- Upgrades], - info("Upgrades: All upgrades applied successfully~n", []), - ok = write_version(), + [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], + info("~s upgrades: All upgrades applied successfully~n", + [Scope]), + ok = write_version(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), - info("Upgrades: Mnesia backup removed~n", []), + info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); {error, E} -> %% If we can't backup, the upgrade hasn't started @@ -238,8 +266,8 @@ apply_upgrades(Upgrades, Fun) -> throw({error, previous_upgrade_failed}) end. -apply_upgrade({M, F}) -> - info("Upgrades: Applying ~w:~w~n", [M, F]), +apply_upgrade(Scope, {M, F}) -> + info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), ok = apply(M, F, []). %% ------------------------------------------------------------------- -- cgit v1.2.1 From 2a43f15e16c5ab4c47827efc6e361b3badc69fba Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:09:19 +0000 Subject: Note that we've upgraded here --- src/rabbit_upgrade.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9d6263fe..9ce9b385 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -104,6 +104,7 @@ non_primary_upgrade(Upgrader, DiscNodes) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), ok = rabbit_mnesia:create_cluster_nodes_config(DiscNodes), + write_version(mnesia), ok end. -- cgit v1.2.1 From 8d1365c898057bec83c45201d99c7ca8d5815e3a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:11:44 +0000 Subject: Remove test upgrades --- src/rabbit_upgrade_functions.erl | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 151b498d..d2ef31b9 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -30,10 +30,6 @@ -rabbit_upgrade({internal_exchanges, mnesia, []}). -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). --rabbit_upgrade({one, mnesia, [user_to_internal_user]}). --rabbit_upgrade({two, mnesia, [one]}). --rabbit_upgrade({three, local, []}). - %% ------------------------------------------------------------------- -ifdef(use_specs). @@ -99,28 +95,6 @@ user_to_internal_user() -> end, [username, password_hash, is_admin], internal_user). - - -one() -> - mnesia( - rabbit_user, - fun ({internal_user, Username, Hash, IsAdmin}) -> - {internal_user, Username, Hash, IsAdmin, foo} - end, - [username, password_hash, is_admin, extra]). - -two() -> - mnesia( - rabbit_user, - fun ({internal_user, Username, Hash, IsAdmin, _}) -> - {internal_user, Username, Hash, IsAdmin} - end, - [username, password_hash, is_admin]). - -three() -> - ok = rabbit_misc:write_term_file(filename:join(rabbit_mnesia:dir(), "test"), - [test]). - %%-------------------------------------------------------------------- mnesia(TableName, Fun, FieldList) -> -- cgit v1.2.1 From fc4e251b01f64cc28a30bf902eb36ab68e144aaa Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 15:25:28 +0000 Subject: Minimise difference with default. --- src/rabbit_mnesia.erl | 61 +++++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 26fda4e9..e63e5de2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -387,8 +387,34 @@ init_db(ClusterNodes, Force) -> {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {_, _} -> - ok = setup_existing_node(ClusterNodes, Nodes) + {[], _} -> + %% We're the first node up + ok = wait_for_tables(), + case rabbit_upgrade:maybe_upgrade(local) of + ok -> ensure_schema_ok(); + version_not_available -> schema_ok_or_move() + end; + {[AnotherNode|_], _} -> + %% Subsequent node in cluster, catch up + ensure_version_ok( + rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + ok = wait_for_tables(), + IsDiskNode = ClusterNodes == [] orelse + lists:member(node(), ClusterNodes), + ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, disc_copies), + ok = create_local_table_copies(case IsDiskNode of + true -> disc; + false -> ram + end), + case rabbit_upgrade:maybe_upgrade(local) of + ok -> ok; + %% If we're just starting up a new node we won't have + %% a version + version_not_available -> + ok = rabbit_upgrade:write_version() + end, + ensure_schema_ok() end; {error, Reason} -> %% one reason we may end up here is if we try to join @@ -397,37 +423,6 @@ init_db(ClusterNodes, Force) -> throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) end. -setup_existing_node(ClusterNodes, Nodes) -> - case Nodes of - [] -> - %% We're the first node up - ok = wait_for_tables(), - case rabbit_upgrade:maybe_upgrade(local) of - ok -> ensure_schema_ok(); - version_not_available -> schema_ok_or_move() - end; - [AnotherNode|_] -> - %% Subsequent node in cluster, catch up - ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - ok = wait_for_tables(), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), - ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), - case rabbit_upgrade:maybe_upgrade(local) of - ok -> ok; - %% If we're just starting up a new node we won't have - %% a version - version_not_available -> ok = rabbit_upgrade:write_version() - end, - ensure_schema_ok() - end. - schema_ok_or_move() -> case check_schema_integrity() of ok -> -- cgit v1.2.1 From 49174fa2a3610b6158fe70744935b0bf885a1e9e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 11 Jan 2011 17:03:56 +0000 Subject: Revert this to the old version that we want. --- src/rabbit_upgrade.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9ce9b385..23dd416a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -211,8 +211,8 @@ edges(_Module, Steps, Scope0) -> Require <- Requires, Scope0 == Scope1]. -unknown_heads(Heads, Known) -> - lists:filter(fun(H) -> not lists:member(H, Known) end, Heads). +unknown_heads(Heads, G) -> + [H || H <- Heads, digraph:vertex(G, H) =:= false]. upgrades_to_apply(Heads, G) -> %% Take all the vertices which can reach the known heads. That's -- cgit v1.2.1 From 02c2dd6844e132b64abef90ecfdf01c9e9124d8d Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 12 Jan 2011 11:59:09 +0000 Subject: swap union and intersection --- src/rabbit_variable_queue.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index c678236f..07297f63 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -1423,8 +1423,8 @@ msgs_written_to_disk(QPid, GuidSet, written) -> msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), State #vqstate { msgs_on_disk = - gb_sets:intersection( - gb_sets:union(MOD, GuidSet), UC) }) + gb_sets:union( + MOD, gb_sets:intersection(UC, GuidSet)) }) end). msg_indices_written_to_disk(QPid, GuidSet) -> @@ -1435,8 +1435,8 @@ msg_indices_written_to_disk(QPid, GuidSet) -> msgs_confirmed(gb_sets:intersection(GuidSet, MOD), State #vqstate { msg_indices_on_disk = - gb_sets:intersection( - gb_sets:union(MIOD, GuidSet), UC) }) + gb_sets:union( + MIOD, gb_sets:intersection(UC, GuidSet)) }) end). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From d9235728acd857cea1240ab84f64dfa16bfdff54 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 12:01:09 +0000 Subject: rabbit_mnesia:all_clustered_nodes/0 does not return disc nodes only. Duh. But we can do better anyway: allow any disc node to do the upgrade. --- src/rabbit_mnesia.erl | 5 ++- src/rabbit_upgrade.erl | 99 +++++++++++++++++++++++++++++++++----------------- 2 files changed, 70 insertions(+), 34 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e63e5de2..47e68c87 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -35,7 +35,8 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1, create_cluster_nodes_config/1]). + empty_ram_only_tables/0, copy_db/1, + create_cluster_nodes_config/1, read_cluster_nodes_config/0]). -export([table_names/0]). @@ -71,6 +72,8 @@ -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). +-spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). +-spec(read_cluster_nodes_config/0 :: () -> [node()]). -endif. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 23dd416a..dcbffd03 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -22,7 +22,8 @@ -module(rabbit_upgrade). -export([maybe_upgrade_mnesia/0, maybe_upgrade/1]). --export([read_version/0, write_version/0, desired_version/0]). +-export([read_version/0, write_version/0, desired_version/0, + desired_version/1]). -include("rabbit.hrl"). @@ -43,6 +44,7 @@ -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). +-spec(desired_version/1 :: (scope()) -> [step()]). -endif. @@ -52,13 +54,10 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), case upgrades_required(mnesia) of [_|_] = Upgrades -> - DiscNodes = rabbit_mnesia:all_clustered_nodes(), - Upgrader = upgrader(DiscNodes), - case node() of - Upgrader -> - primary_upgrade(Upgrades, DiscNodes); - _ -> - non_primary_upgrade(Upgrader, DiscNodes) + Nodes = rabbit_mnesia:all_clustered_nodes(), + case am_i_upgrader(Nodes) of + true -> primary_upgrade(Upgrades, Nodes); + false -> non_primary_upgrade(Nodes) end; [] -> ok; @@ -66,12 +65,57 @@ maybe_upgrade_mnesia() -> ok end. -upgrader(Nodes) -> - [Upgrader|_] = lists:usort(Nodes), - Upgrader. +am_i_upgrader(Nodes) -> + Running = nodes_running(Nodes), + case Running of + [] -> + case am_i_disc_node() of + true -> + true; + false -> + die("Cluster upgrade needed but this is a ram node.~n " + "Please start any of the disc nodes first.", []) + end; + [Another|_] -> + ClusterVersion = + case rpc:call(Another, + rabbit_upgrade, desired_version, [mnesia]) of + {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; + {badrpc, Reason} -> {unknown, Reason}; + V -> V + end, + case desired_version(mnesia) of + ClusterVersion -> + %% The other node(s) have upgraded already, I am not the + %% upgrader + false; + MyVersion -> + %% The other node(s) are running an unexpected version. + die("Cluster upgrade needed but other nodes are " + "running ~p~n" + "and I want ~p", [ClusterVersion, MyVersion]) + end + end. + +am_i_disc_node() -> + %% The cluster config does not list all disc nodes, but it will list us + %% if we're one. + case rabbit_mnesia:read_cluster_nodes_config() of + [] -> true; + DiscNodes -> lists:member(node(), DiscNodes) + end. -primary_upgrade(Upgrades, DiscNodes) -> - Others = DiscNodes -- [node()], +die(Msg, Args) -> + %% We don't throw or exit here since that gets thrown + %% straight out into do_boot, generating an erl_crash.dump + %% and displaying any error message in a confusing way. + error_logger:error_msg(Msg, Args), + io:format("~n~n** " ++ Msg ++ " **~n~n~n", Args), + error_logger:logfile(close), + halt(1). + +primary_upgrade(Upgrades, Nodes) -> + Others = Nodes -- [node()], apply_upgrades( mnesia, Upgrades, @@ -87,26 +131,15 @@ primary_upgrade(Upgrades, DiscNodes) -> force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. -non_primary_upgrade(Upgrader, DiscNodes) -> - case node_running(Upgrader) of - false -> - Msg = "~n~n * Cluster upgrade needed. Please start node ~s " - "first. * ~n~n~n", - Args = [Upgrader], - %% We don't throw or exit here since that gets thrown - %% straight out into do_boot, generating an erl_crash.dump - %% and displaying any error message in a confusing way. - error_logger:error_msg(Msg, Args), - io:format(Msg, Args), - error_logger:logfile(close), - halt(1); - true -> - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema), - ok = rabbit_mnesia:create_cluster_nodes_config(DiscNodes), - write_version(mnesia), - ok - end. +non_primary_upgrade(Nodes) -> + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema), + ok = rabbit_mnesia:create_cluster_nodes_config(Nodes), + write_version(mnesia), + ok. + +nodes_running(Nodes) -> + [N || N <- Nodes, node_running(N)]. node_running(Node) -> case rpc:call(Node, application, which_applications, []) of -- cgit v1.2.1 From 81fd88b601cfa099f052f4270317248c6f870e72 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 12:43:25 +0000 Subject: Remove pointless differences from default. --- src/rabbit_mnesia.erl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 47e68c87..6523a036 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -44,8 +44,6 @@ %% other mnesia-using Erlang applications, such as ejabberd -export([create_tables/0]). --define(EXAMPLE_RABBIT_TABLE, rabbit_durable_exchange). - -include("rabbit.hrl"). %%---------------------------------------------------------------------------- @@ -164,7 +162,7 @@ nodes_of_type(Type) -> %% Specifically, we check whether a certain table, which we know %% will be written to disk on a disc node, is stored on disk or in %% RAM. - mnesia:table_info(?EXAMPLE_RABBIT_TABLE, Type). + mnesia:table_info(rabbit_durable_exchange, Type). table_definitions() -> [{rabbit_user, @@ -401,7 +399,6 @@ init_db(ClusterNodes, Force) -> %% Subsequent node in cluster, catch up ensure_version_ok( rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), - ok = wait_for_tables(), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), -- cgit v1.2.1 From d19649eec7e0eb34fbf16b906d36a713b9737c5b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 12:54:12 +0000 Subject: Detect old-style versions properly. --- src/rabbit_upgrade.erl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dcbffd03..a570df4a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -159,9 +159,9 @@ maybe_upgrade(Scope) -> read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> case orddict:find(mnesia, V) of - error -> {ok, convert_old_version(V)}; - _ -> {ok, V} + {ok, [V]} -> case is_orddict(V) of + false -> {ok, convert_old_version(V)}; + true -> {ok, V} end; {error, _} = Err -> Err end. @@ -315,3 +315,9 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% NB: we cannot use rabbit_log here since it may not have been %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). + +%% This doesn't check it's ordered but that's not needed for our purposes +is_orddict(Thing) -> + is_list(Thing) andalso + lists:all(fun(Item) -> is_tuple(Item) andalso size(Item) == 2 end, + Thing). -- cgit v1.2.1 From 87e8c18729974033ecef50a6b91b336e04189a15 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 13:05:53 +0000 Subject: And fix again. --- src/rabbit_upgrade.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index a570df4a..4bf8d661 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -159,7 +159,7 @@ maybe_upgrade(Scope) -> read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> case is_orddict(V) of + {ok, [V]} -> case is_new_version(V) of false -> {ok, convert_old_version(V)}; true -> {ok, V} end; @@ -316,8 +316,8 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). -%% This doesn't check it's ordered but that's not needed for our purposes -is_orddict(Thing) -> - is_list(Thing) andalso +is_new_version(Version) -> + is_list(Version) andalso + length(Version) > 0 andalso lists:all(fun(Item) -> is_tuple(Item) andalso size(Item) == 2 end, - Thing). + Version). -- cgit v1.2.1 From 3821445acd31339a98af2ab0508f092ec06332d2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 13:31:41 +0000 Subject: Don't display a cluster-related message on a single node. --- src/rabbit_upgrade.erl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 4bf8d661..2c4dad87 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -120,11 +120,16 @@ primary_upgrade(Upgrades, Nodes) -> mnesia, Upgrades, fun () -> - info("mnesia upgrades: Breaking cluster~n", []), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) - || Node <- Others] + case Others of + [] -> + ok; + _ -> + info("mnesia upgrades: Breaking cluster~n", []), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] + end end), ok. -- cgit v1.2.1 From e45219e2eea0ef94646518a122dedf6f39fadc2f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 14:03:49 +0000 Subject: Break the bad news rather than just timing out wait_for_tables as we traditionally have done. --- src/rabbit_upgrade.erl | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 2c4dad87..53ed99d3 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -52,9 +52,9 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), + Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of [_|_] = Upgrades -> - Nodes = rabbit_mnesia:all_clustered_nodes(), case am_i_upgrader(Nodes) of true -> primary_upgrade(Upgrades, Nodes); false -> non_primary_upgrade(Nodes) @@ -62,7 +62,15 @@ maybe_upgrade_mnesia() -> [] -> ok; version_not_available -> - ok + case Nodes of + [_] -> + ok; + _ -> + die("Cluster upgrade needed but upgrading from < 2.1.1.~n" + " Unfortunately you will need to rebuild the " + "cluster.", + []) + end end. am_i_upgrader(Nodes) -> -- cgit v1.2.1 From 90d3914c6aab0b510c42000d00615d5c51ec4345 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 12 Jan 2011 14:43:32 +0000 Subject: Cosmetic. --- src/rabbit_mnesia.erl | 3 ++- src/rabbit_upgrade.erl | 34 ++++++++++++++-------------------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 6523a036..ee6ede35 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -408,7 +408,8 @@ init_db(ClusterNodes, Force) -> false -> ram end), case rabbit_upgrade:maybe_upgrade(local) of - ok -> ok; + ok -> + ok; %% If we're just starting up a new node we won't have %% a version version_not_available -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 53ed99d3..6df881fd 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -63,13 +63,10 @@ maybe_upgrade_mnesia() -> ok; version_not_available -> case Nodes of - [_] -> - ok; - _ -> - die("Cluster upgrade needed but upgrading from < 2.1.1.~n" - " Unfortunately you will need to rebuild the " - "cluster.", - []) + [_] -> ok; + _ -> die("Cluster upgrade needed but upgrading from " + "< 2.1.1.~n Unfortunately you will need to " + "rebuild the cluster.", []) end end. @@ -78,11 +75,10 @@ am_i_upgrader(Nodes) -> case Running of [] -> case am_i_disc_node() of - true -> - true; - false -> - die("Cluster upgrade needed but this is a ram node.~n " - "Please start any of the disc nodes first.", []) + true -> true; + false -> die("Cluster upgrade needed but this is a ram " + "node.~n Please start any of the disc nodes " + "first.", []) end; [Another|_] -> ClusterVersion = @@ -100,8 +96,8 @@ am_i_upgrader(Nodes) -> MyVersion -> %% The other node(s) are running an unexpected version. die("Cluster upgrade needed but other nodes are " - "running ~p~n" - "and I want ~p", [ClusterVersion, MyVersion]) + "running ~p~nand I want ~p", + [ClusterVersion, MyVersion]) end end. @@ -131,12 +127,10 @@ primary_upgrade(Upgrades, Nodes) -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), case Others of - [] -> - ok; - _ -> - info("mnesia upgrades: Breaking cluster~n", []), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) - || Node <- Others] + [] -> ok; + _ -> info("mnesia upgrades: Breaking cluster~n", []), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] end end), ok. -- cgit v1.2.1 From b88381ac6a1ea6badf70f0eec7384f9beb7a09bf Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 19 Jan 2011 12:49:30 +0000 Subject: Sender-specified distribution for fanout exchanges --- codegen.py | 2 +- include/rabbit.hrl | 3 +++ src/rabbit_exchange.erl | 26 +++++++++++++++++++++++++- src/rabbit_exchange_type_direct.erl | 11 +++-------- src/rabbit_exchange_type_fanout.erl | 10 ++++++++-- src/rabbit_misc.erl | 11 +---------- src/rabbit_router.erl | 5 +++-- 7 files changed, 44 insertions(+), 24 deletions(-) diff --git a/codegen.py b/codegen.py index 979c5bd8..6e9139b8 100644 --- a/codegen.py +++ b/codegen.py @@ -354,7 +354,7 @@ def genErl(spec): -type(amqp_field_type() :: 'longstr' | 'signedint' | 'decimal' | 'timestamp' | 'table' | 'byte' | 'double' | 'float' | 'long' | - 'short' | 'bool' | 'binary' | 'void'). + 'short' | 'bool' | 'binary' | 'void' | 'array'). -type(amqp_property_type() :: 'shortstr' | 'longstr' | 'octet' | 'shortint' | 'longint' | 'longlongint' | 'timestamp' | 'bit' | 'table'). diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 81c3996b..5c5fad76 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -96,6 +96,9 @@ -define(DESIRED_HIBERNATE, 10000). -define(STATS_INTERVAL, 5000). +-define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]). +-define(DELETED_HEADER, <<"BCC">>). + -ifdef(debug). -define(LOGDEBUG0(F), rabbit_log:debug(F)). -define(LOGDEBUG(F,A), rabbit_log:debug(F,A)). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a95cf0b1..d9e3431d 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -35,6 +35,7 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). +-export([header_routes/2]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -86,7 +87,8 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). - +-spec(header_routes/2 :: (rabbit_framing:amqp_table(), rabbit_types:vhost()) -> + [rabbit_types:r('queue')]). -endif. %%---------------------------------------------------------------------------- @@ -319,3 +321,25 @@ unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_exchange, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. + +header_routes(undefined, _VHost) -> + []; +header_routes(Headers, VHost) -> + [rabbit_misc:r(VHost, queue, RKey) || + RKey <- lists:flatten([routing_keys(Headers, Header) || + Header <- ?ROUTING_HEADERS])]. + +routing_keys(HeadersTable, Key) -> + case rabbit_misc:table_lookup(HeadersTable, Key) of + {longstr, Route} -> [Route]; + {array, Routes} -> rkeys(Routes, []); + _ -> [] + end. + +rkeys([{longstr, BinVal} | Rest], RKeys) -> + rkeys(Rest, [BinVal | RKeys]); +rkeys([{_, _} | Rest], RKeys) -> + rkeys(Rest, RKeys); +rkeys(_, RKeys) -> + RKeys. + diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index ab688853..9547117c 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -55,14 +55,9 @@ route(#exchange{name = #resource{virtual_host = VHost} = Name}, #delivery{message = #basic_message{routing_key = RoutingKey, content = Content}}) -> BindingRoutes = rabbit_router:match_routing_key(Name, RoutingKey), - HeaderRKeys = - case (Content#content.properties)#'P_basic'.headers of - undefined -> []; - Headers -> rabbit_misc:table_lookup(Headers, <<"CC">>, <<0>>) ++ - rabbit_misc:table_lookup(Headers, <<"BCC">>, <<0>>) - end, - HeaderRoutes = [rabbit_misc:r(VHost, queue, RKey) || RKey <- HeaderRKeys], - lists:usort(BindingRoutes ++ HeaderRoutes). + HeaderRoutes = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers, VHost), + BindingRoutes ++ HeaderRoutes. validate(_X) -> ok. create(_X) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index e7f75464..e9faf0a2 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -31,6 +31,7 @@ -module(rabbit_exchange_type_fanout). -include("rabbit.hrl"). +-include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -50,8 +51,13 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. -route(#exchange{name = Name}, _Delivery) -> - rabbit_router:match_routing_key(Name, '_'). +route(#exchange{name = #resource{virtual_host = VHost} = Name}, + #delivery{message = #basic_message{content = Content}}) -> + BindingRoutes = rabbit_router:match_routing_key(Name, '_'), + HeaderRoutes = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers, VHost), + BindingRoutes ++ HeaderRoutes. + validate(_X) -> ok. create(_X) -> ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 604346ed..15ba787a 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -40,7 +40,7 @@ protocol_error/3, protocol_error/4, protocol_error/1]). -export([not_found/1, assert_args_equivalence/4]). -export([dirty_read/1]). --export([table_lookup/3, table_lookup/2]). +-export([table_lookup/2]). -export([r/3, r/2, r_arg/4, rs/1]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). @@ -112,8 +112,6 @@ 'ok' | rabbit_types:connection_exit()). -spec(dirty_read/1 :: ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). --spec(table_lookup/3 :: - (rabbit_framing:amqp_table(), binary(), binary()) -> [binary()]). -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). @@ -255,13 +253,6 @@ dirty_read(ReadSpec) -> [] -> {error, not_found} end. -table_lookup(Table, Key, Separator) -> - case table_lookup(Table, Key) of - undefined -> []; - {longstr, BinVal} -> binary:split(BinVal, Separator, [global]); - _ -> [] - end. - table_lookup(Table, Key) -> case lists:keysearch(Key, 1, Table) of {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin}; diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 2f556df7..7f9b823e 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -69,7 +69,7 @@ deliver(QNames, Delivery = #delivery{mandatory = false, %% is preserved. This scales much better than the non-immediate %% case below. QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, <<"BCC">>), + ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), delegate:invoke_no_result( QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), {routed, QPids}; @@ -77,7 +77,7 @@ deliver(QNames, Delivery = #delivery{mandatory = false, deliver(QNames, Delivery = #delivery{mandatory = Mandatory, immediate = Immediate}) -> QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, <<"BCC">>), + ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), {Success, _} = delegate:invoke(QPids, fun (Pid) -> @@ -87,6 +87,7 @@ deliver(QNames, Delivery = #delivery{mandatory = Mandatory, lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). +%% This breaks the spec rule forbidding message modification strip_header(Delivery = #delivery{message = Message = #basic_message{ content = Content = #content{ properties = Props = #'P_basic'{headers = Headers}}}}, -- cgit v1.2.1 From 69f35fc60d84b1ffe4424dfe7d47f909bec8e423 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 19 Jan 2011 14:38:43 +0000 Subject: replace the sort with a gb_tree Instead of creating a list and sorting it, insert the MsgSeqNos into a gb_tree. Dicts and orddicts are slower. --- src/rabbit_amqqueue_process.erl | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 38b83117..b0aea012 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -431,27 +431,22 @@ confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> fun(Guid, {CMs, GTC0}) -> case dict:find(Guid, GTC0) of {ok, {ChPid, MsgSeqNo}} -> - {[{ChPid, MsgSeqNo} | CMs], dict:erase(Guid, GTC0)}; + {gb_trees_cons(ChPid, MsgSeqNo, CMs), + dict:erase(Guid, GTC0)}; _ -> {CMs, GTC0} end - end, {[], GTC}, Guids), - case lists:usort(CMs) of - [{Ch, MsgSeqNo} | CMs1] -> - [rabbit_channel:confirm(ChPid, MsgSeqNos) || - {ChPid, MsgSeqNos} <- group_confirms_by_channel( - CMs1, [{Ch, [MsgSeqNo]}])]; - [] -> - ok - end, + end, {gb_trees:empty(), GTC}, Guids), + gb_trees:map(fun(ChPid, MsgSeqNos) -> + rabbit_channel:confirm(ChPid, MsgSeqNos) + end, CMs), State#q{guid_to_channel = GTC1}. -group_confirms_by_channel([], Acc) -> - Acc; -group_confirms_by_channel([{Ch, Msg1} | CMs], [{Ch, Msgs} | Acc]) -> - group_confirms_by_channel(CMs, [{Ch, [Msg1 | Msgs]} | Acc]); -group_confirms_by_channel([{Ch, Msg1} | CMs], Acc) -> - group_confirms_by_channel(CMs, [{Ch, [Msg1]} | Acc]). +gb_trees_cons(Key, Value, Tree) -> + case gb_trees:lookup(Key, Tree) of + {value, Values} -> gb_trees:update(Key, [Value | Values], Tree); + none -> gb_trees:insert(Key, [Value], Tree) + end. record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> {no_confirm, State}; -- cgit v1.2.1 From 73da7e693ef652858bc348bd340bc2df9d3440ef Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Wed, 19 Jan 2011 18:25:31 +0000 Subject: fixing merge conflicts; some stylistic adjustments --- src/rabbit_exchange_type_topic.erl | 149 +++++++++++++++++++------------------ src/rabbit_tests.erl | 76 +++++++++++-------- 2 files changed, 121 insertions(+), 104 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2da3f3ee..2e181f1d 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -48,27 +48,28 @@ validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. -delete(_Tx, #exchange{name = X}, _Bs) -> - rabbit_misc:execute_mnesia_transaction(fun () -> trie_remove_all_edges(X), - trie_remove_all_bindings(X) - end), +delete(true, #exchange{name = X}, _Bs) -> + trie_remove_all_edges(X), + trie_remove_all_bindings(X), + ok; +delete(false, _Exchange, _Bs) -> ok. -add_binding(_Tx, _Exchange, #binding{source = X, key = K, destination = D}) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> FinalNode = follow_down_create(X, split_topic_key(K)), - trie_add_binding(X, FinalNode, D) - end), +add_binding(true, _Exchange, #binding{source = X, key = K, destination = D}) -> + FinalNode = follow_down_create(X, split_topic_key(K)), + trie_add_binding(X, FinalNode, D), + ok; +add_binding(false, _Exchange, _Binding) -> ok. -remove_bindings(_Tx, _X, Bs) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> lists:foreach(fun remove_binding/1, Bs) end), +remove_bindings(true, _X, Bs) -> + lists:foreach(fun remove_binding/1, Bs), + ok; +remove_bindings(false, _X, _Bs) -> ok. remove_binding(#binding{source = X, key = K, destination = D}) -> - Path = follow_down_get_path(X, split_topic_key(K)), - {FinalNode, _} = hd(Path), + Path = [{FinalNode, _} | _] = follow_down_get_path(X, split_topic_key(K)), trie_remove_binding(X, FinalNode, D), remove_path_if_empty(X, Path), ok. @@ -108,19 +109,8 @@ trie_match_skip_any(X, Node, []) -> trie_match_skip_any(X, Node, [_ | RestW] = Words) -> trie_match(X, Node, Words) ++ trie_match_skip_any(X, Node, RestW). -follow_down(X, Words) -> - follow_down(X, root, Words). - -follow_down(_X, CurNode, []) -> - {ok, CurNode}; -follow_down(X, CurNode, [W | RestW]) -> - case trie_child(X, CurNode, W) of - {ok, NextNode} -> follow_down(X, NextNode, RestW); - error -> {error, CurNode, [W | RestW]} - end. - follow_down_create(X, Words) -> - case follow_down(X, Words) of + case follow_down_last_node(X, Words) of {ok, FinalNode} -> FinalNode; {error, Node, RestW} -> lists:foldl( fun (W, CurNode) -> @@ -130,14 +120,26 @@ follow_down_create(X, Words) -> end, Node, RestW) end. +follow_down_last_node(X, Words) -> + follow_down(X, fun (_, Node, _) -> Node end, root, Words). + follow_down_get_path(X, Words) -> - follow_down_get_path(X, root, Words, [{root, none}]). + {ok, Path} = + follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end, + [{root, none}], Words), + Path. + +follow_down(X, AccFun, Acc0, Words) -> + follow_down(X, root, AccFun, Acc0, Words). -follow_down_get_path(_, _, [], PathAcc) -> - PathAcc; -follow_down_get_path(X, CurNode, [W | RestW], PathAcc) -> - {ok, NextNode} = trie_child(X, CurNode, W), - follow_down_get_path(X, NextNode, RestW, [{NextNode, W} | PathAcc]). +follow_down(_X, _CurNode, _AccFun, Acc, []) -> + {ok, Acc}; +follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) -> + case trie_child(X, CurNode, W) of + {ok, NextNode} -> follow_down(X, NextNode, AccFun, + AccFun(W, NextNode, Acc), RestW); + error -> {error, Acc, Words} + end. remove_path_if_empty(_, [{root, none}]) -> ok; @@ -149,9 +151,10 @@ remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) -> end. trie_child(X, Node, Word) -> - case mnesia:read(rabbit_topic_trie_edge, #trie_edge{exchange_name = X, - node_id = Node, - word = Word}) of + case mnesia:read(rabbit_topic_trie_edge, + #trie_edge{exchange_name = X, + node_id = Node, + word = Word}) of [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; [] -> error end. @@ -159,8 +162,8 @@ trie_child(X, Node, Word) -> trie_bindings(X, Node) -> MatchHead = #topic_trie_binding{ trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - destination = '$1'}}, + node_id = Node, + destination = '$1'}}, mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]). trie_add_edge(X, FromNode, ToNode, W) -> @@ -172,9 +175,9 @@ trie_remove_edge(X, FromNode, ToNode, W) -> trie_edge_op(X, FromNode, ToNode, W, Op) -> ok = Op(rabbit_topic_trie_edge, #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, - node_id = FromNode, - word = W}, - node_id = ToNode}, + node_id = FromNode, + word = W}, + node_id = ToNode}, write). trie_add_binding(X, Node, D) -> @@ -185,28 +188,41 @@ trie_remove_binding(X, Node, D) -> trie_binding_op(X, Node, D, Op) -> ok = Op(rabbit_topic_trie_binding, - #topic_trie_binding{trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - destination = D}}, + #topic_trie_binding{ + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + destination = D}}, write). trie_has_any_children(X, Node) -> - MatchHead = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, - node_id = Node, - _='_'}, - _='_'}, - Select = mnesia:select(rabbit_topic_trie_edge, - [{MatchHead, [], ['$_']}], 1, read), - select_while_no_result(Select) /= '$end_of_table'. + has_any(rabbit_topic_trie_edge, + #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + node_id = Node, + _ = '_'}, + _ = '_'}). trie_has_any_bindings(X, Node) -> - MatchHead = #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - _='_'}, - _='_'}, - Select = mnesia:select(rabbit_topic_trie_binding, - [{MatchHead, [], ['$_']}], 1, read), + has_any(rabbit_topic_trie_binding, + #topic_trie_binding{ + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + _ = '_'}, + _ = '_'}). + +trie_remove_all_edges(X) -> + remove_all(rabbit_topic_trie_edge, + #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + _ = '_'}, + _ = '_'}). + +trie_remove_all_bindings(X) -> + remove_all(rabbit_topic_trie_binding, + #topic_trie_binding{ + trie_binding = #trie_binding{exchange_name = X, _ = '_'}, + _ = '_'}). + +has_any(Table, MatchHead) -> + Select = mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read), select_while_no_result(Select) /= '$end_of_table'. select_while_no_result({[], Cont}) -> @@ -214,21 +230,9 @@ select_while_no_result({[], Cont}) -> select_while_no_result(Other) -> Other. -trie_remove_all_edges(X) -> - Pattern = #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, - _='_'}, - _='_'}, - lists:foreach( - fun (R) -> mnesia:delete_object(rabbit_topic_trie_edge, R, write) end, - mnesia:match_object(rabbit_topic_trie_edge, Pattern, write)). - -trie_remove_all_bindings(X) -> - Pattern = #topic_trie_binding{trie_binding = #trie_binding{exchange_name =X, - _='_'}, - _='_'}, - lists:foreach( - fun (R) -> mnesia:delete_object(rabbit_topic_trie_binding, R, write) end, - mnesia:match_object(rabbit_topic_trie_binding, Pattern, write)). +remove_all(Table, Pattern) -> + lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end, + mnesia:match_object(Table, Pattern, write)). new_node_id() -> rabbit_guid:guid(). @@ -244,3 +248,4 @@ split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) -> split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]); split_topic_key(<>, RevWordAcc, RevResAcc) -> split_topic_key(Rest, [C | RevWordAcc], RevResAcc). + diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index b80f3692..32cdaa52 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -588,7 +588,7 @@ test_topic_matching() -> auto_delete = false, arguments = []}, %% create rabbit_exchange_type_topic:validate(X), - rabbit_exchange_type_topic:create(X), + exchange_op_callback(X, create, []), %% add some bindings Bindings = lists:map( @@ -624,60 +624,72 @@ test_topic_matching() -> {"#.#.#", "t24"}, {"*", "t25"}, {"#.b.#", "t26"}]), - lists:foreach(fun (B) -> rabbit_exchange_type_topic:add_binding(X, B) end, + lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), %% test some matches test_topic_expect_match(X, - [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", "t18", "t20", - "t21", "t22", "t23", "t24", "t26"]}, - {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", "t12", "t15", - "t21", "t22", "t23", "t24", "t26"]}, - {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", "t18", "t21", - "t22", "t23", "t24", "t26"]}, - {"", ["t5", "t6", "t17", "t24"]}, - {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24", "t26"]}, - {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", "t23", "t24"]}, - {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", "t24"]}, - {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", - "t24"]}, - {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", "t22", "t23", - "t24", "t26"]}, + [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", + "t18", "t20", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", + "t12", "t15", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", + "t18", "t21", "t22", "t23", "t24", "t26"]}, + {"", ["t5", "t6", "t17", "t24"]}, + {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", "t23", + "t24"]}, + {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", + "t24"]}, + {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", + "t24"]}, + {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", "t22", + "t23", "t24", "t26"]}, {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, - {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", "t25"]}]), + {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", + "t25"]}]), %% remove some bindings RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), lists:nth(11, Bindings), lists:nth(19, Bindings), lists:nth(21, Bindings)], - rabbit_exchange_type_topic:remove_bindings(X, RemovedBindings), + exchange_op_callback(X, remove_bindings, [RemovedBindings]), RemainingBindings = ordsets:to_list( ordsets:subtract(ordsets:from_list(Bindings), ordsets:from_list(RemovedBindings))), %% test some matches test_topic_expect_match(X, - [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", "t23", - "t24", "t26"]}, - {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", "t22", "t23", - "t24", "t26"]}, - {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", "t23", - "t24", "t26"]}, - {"", ["t6", "t17", "t24"]}, - {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, - {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, - {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, - {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, - {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", "t24", "t26"]}, + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", + "t23", "t24", "t26"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", + "t22", "t23", "t24", "t26"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", + "t23", "t24", "t26"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", + "t24", "t26"]}, {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, - {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), %% remove the entire exchange - rabbit_exchange_type_topic:delete(X, RemainingBindings), + exchange_op_callback(X, delete, [RemainingBindings]), %% none should match now test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), passed. +exchange_op_callback(X, Fun, ExtraArgs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> rabbit_exchange:callback(X, Fun, [true, X] ++ ExtraArgs) end), + rabbit_exchange:callback(X, Fun, [false, X] ++ ExtraArgs). + test_topic_expect_match(X, List) -> lists:foreach( fun ({Key, Expected}) -> -- cgit v1.2.1 From 7d69905fb40fcfa8a9657b951c1a1cde058be2d4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 20 Jan 2011 14:18:38 +0000 Subject: Correct formatting of resources in log messages --- src/rabbit_mirror_queue_coordinator.erl | 12 ++++++------ src/rabbit_mirror_queue_slave.erl | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 608148b5..d853a3e9 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -74,19 +74,19 @@ handle_cast({add_slave, Node}, State = #state { q = Q }) -> case lists:member(Node, Nodes) of true -> Result = rabbit_mirror_queue_slave_sup:start_child(Node, [Q]), - rabbit_log:info("Adding slave node for queue ~p: ~p~n", - [Q #amqqueue.name, Result]); + rabbit_log:info("Adding slave node for ~p: ~p~n", + [rabbit_misc:rs(Q #amqqueue.name), Result]); false -> rabbit_log:info( - "Ignoring request to add slave on node ~p for queue ~p~n", - [Q #amqqueue.name, Node]) + "Ignoring request to add slave on node ~p for ~p~n", + [Node, rabbit_misc:rs(Q #amqqueue.name)]) end, noreply(State); handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> - rabbit_log:info("Master ~p saw deaths ~p for queue ~p~n", - [self(), Deaths, QueueName]), + rabbit_log:info("Master ~p saw deaths ~p for ~p~n", + [self(), Deaths, rabbit_misc:rs(QueueName)]), case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= node() -> noreply(State); diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index a61cea0d..483c849a 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -164,8 +164,8 @@ handle_call({gm_deaths, Deaths}, From, State = #state { q = #amqqueue { name = QueueName }, gm = GM, master_node = MNode }) -> - rabbit_log:info("Slave ~p saw deaths ~p for queue ~p~n", - [self(), Deaths, QueueName]), + rabbit_log:info("Slave ~p saw deaths ~p for ~p~n", + [self(), Deaths, rabbit_misc:rs(QueueName)]), case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= MNode -> reply(ok, State); @@ -325,8 +325,8 @@ promote_me(From, #state { q = Q, sender_queues = SQ, seen = Seen, guid_ack = GA }) -> - rabbit_log:info("Promoting slave ~p for queue ~p~n", - [self(), Q #amqqueue.name]), + rabbit_log:info("Promoting slave ~p for ~p~n", + [self(), rabbit_misc:rs(Q #amqqueue.name)]), {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), true = unlink(GM), gen_server2:reply(From, {promote, CPid}), -- cgit v1.2.1 From 844093df9d5a32552b70dd51640551bfcd19190e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 20 Jan 2011 14:24:43 +0000 Subject: And the result of misc:rs is a string, so format it as such --- src/rabbit_mirror_queue_coordinator.erl | 6 +++--- src/rabbit_mirror_queue_slave.erl | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index d853a3e9..30fd6ed3 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -74,18 +74,18 @@ handle_cast({add_slave, Node}, State = #state { q = Q }) -> case lists:member(Node, Nodes) of true -> Result = rabbit_mirror_queue_slave_sup:start_child(Node, [Q]), - rabbit_log:info("Adding slave node for ~p: ~p~n", + rabbit_log:info("Adding slave node for ~s: ~p~n", [rabbit_misc:rs(Q #amqqueue.name), Result]); false -> rabbit_log:info( - "Ignoring request to add slave on node ~p for ~p~n", + "Ignoring request to add slave on node ~p for ~s~n", [Node, rabbit_misc:rs(Q #amqqueue.name)]) end, noreply(State); handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> - rabbit_log:info("Master ~p saw deaths ~p for ~p~n", + rabbit_log:info("Master ~p saw deaths ~p for ~s~n", [self(), Deaths, rabbit_misc:rs(QueueName)]), case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= node() -> diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 483c849a..4f9d2066 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -164,7 +164,7 @@ handle_call({gm_deaths, Deaths}, From, State = #state { q = #amqqueue { name = QueueName }, gm = GM, master_node = MNode }) -> - rabbit_log:info("Slave ~p saw deaths ~p for ~p~n", + rabbit_log:info("Slave ~p saw deaths ~p for ~s~n", [self(), Deaths, rabbit_misc:rs(QueueName)]), case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= MNode -> @@ -325,7 +325,7 @@ promote_me(From, #state { q = Q, sender_queues = SQ, seen = Seen, guid_ack = GA }) -> - rabbit_log:info("Promoting slave ~p for ~p~n", + rabbit_log:info("Promoting slave ~p for ~s~n", [self(), rabbit_misc:rs(Q #amqqueue.name)]), {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), true = unlink(GM), -- cgit v1.2.1 From 0b35d977d92af97c5c0d36ef890f2a4ac9a48881 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 21 Jan 2011 12:06:49 +0000 Subject: Adding gm related files, plucked from branch bug23554 --- include/gm_specs.hrl | 28 ++ src/gm.erl | 1308 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/gm_test.erl | 126 +++++ 3 files changed, 1462 insertions(+) create mode 100644 include/gm_specs.hrl create mode 100644 src/gm.erl create mode 100644 src/gm_test.erl diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl new file mode 100644 index 00000000..7f607755 --- /dev/null +++ b/include/gm_specs.hrl @@ -0,0 +1,28 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-ifdef(use_specs). + +-type(callback_result() :: 'ok' | {'stop', any()}). +-type(args() :: [any()]). +-type(members() :: [pid()]). + +-spec(joined/2 :: (args(), members()) -> callback_result()). +-spec(members_changed/3 :: (args(), members(), members()) -> callback_result()). +-spec(handle_msg/3 :: (args(), pid(), any()) -> callback_result()). +-spec(terminate/2 :: (args(), term()) -> any()). + +-endif. diff --git a/src/gm.erl b/src/gm.erl new file mode 100644 index 00000000..baf46471 --- /dev/null +++ b/src/gm.erl @@ -0,0 +1,1308 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(gm). + +%% Guaranteed Multicast +%% ==================== +%% +%% This module provides the ability to create named groups of +%% processes to which members can be dynamically added and removed, +%% and for messages to be broadcast within the group that are +%% guaranteed to reach all members of the group during the lifetime of +%% the message. The lifetime of a message is defined as being, at a +%% minimum, the time from which the message is first sent to any +%% member of the group, up until the time at which it is known by the +%% member who published the message that the message has reached all +%% group members. +%% +%% The guarantee given is that provided a message, once sent, makes it +%% to members who do not all leave the group, the message will +%% continue to propagate to all group members. +%% +%% Another way of stating the guarantee is that if member P publishes +%% messages m and m', then for all members P', if P' is a member of +%% the group prior to the publication of m, and P' receives m', then +%% P' will receive m. +%% +%% Note that only local-ordering is enforced: i.e. if member P sends +%% message m and then message m', then for-all members P', if P' +%% receives m and m', then they will receive m' after m. Causality +%% ordering is _not_ enforced. I.e. if member P receives message m +%% and as a result publishes message m', there is no guarantee that +%% other members P' will receive m before m'. +%% +%% +%% API Use +%% ------- +%% +%% Mnesia must be started. Use the idempotent create_tables/0 function +%% to create the tables required. +%% +%% start_link/3 +%% Provide the group name, the callback module name, and a list of any +%% arguments you wish to be passed into the callback module's +%% functions. The joined/1 will be called when we have joined the +%% group, and the list of arguments will have appended to it a list of +%% the current members of the group. See the comments in +%% behaviour_info/1 below for further details of the callback +%% functions. +%% +%% leave/1 +%% Provide the Pid. Removes the Pid from the group. The callback +%% terminate/1 function will be called. +%% +%% broadcast/2 +%% Provide the Pid and a Message. The message will be sent to all +%% members of the group as per the guarantees given above. This is a +%% cast and the function call will return immediately. There is no +%% guarantee that the message will reach any member of the group. +%% +%% confirmed_broadcast/2 +%% Provide the Pid and a Message. As per broadcast/2 except that this +%% is a call, not a cast, and only returns 'ok' once the Message has +%% reached every member of the group. Do not call +%% confirmed_broadcast/2 directly from the callback module otherwise +%% you will deadlock the entire group. +%% +%% group_members/1 +%% Provide the Pid. Returns a list of the current group members. +%% +%% +%% Implementation Overview +%% ----------------------- +%% +%% One possible means of implementation would be a fan-out from the +%% sender to every member of the group. This would require that the +%% group is fully connected, and, in the event that the original +%% sender of the message disappears from the group before the message +%% has made it to every member of the group, raises questions as to +%% who is responsible for sending on the message to new group members. +%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] - +%% if the sender dies part way through, who is responsible for +%% ensuring that the remaining Members receive the Msg? In the event +%% that within the group, messages sent are broadcast from a subset of +%% the members, the fan-out arrangement has the potential to +%% substantially impact the CPU and network workload of such members, +%% as such members would have to accommodate the cost of sending each +%% message to every group member. +%% +%% Instead, if the members of the group are arranged in a chain, then +%% it becomes easier to reason about who within the group has received +%% each message and who has not. It eases issues of responsibility: in +%% the event of a group member disappearing, the nearest upstream +%% member of the chain is responsible for ensuring that messages +%% continue to propagate down the chain. It also results in equal +%% distribution of sending and receiving workload, even if all +%% messages are being sent from just a single group member. This +%% configuration has the further advantage that it is not necessary +%% for every group member to know of every other group member, and +%% even that a group member does not have to be accessible from all +%% other group members. +%% +%% Performance is kept high by permitting pipelining and all +%% communication between joined group members is asynchronous. In the +%% chain A -> B -> C -> D, if A sends a message to the group, it will +%% not directly contact C or D. However, it must know that D receives +%% the message (in addition to B and C) before it can consider the +%% message fully sent. A simplistic implementation would require that +%% D replies to C, C replies to B and B then replies to A. This would +%% result in a propagation delay of twice the length of the chain. It +%% would also require, in the event of the failure of C, that D knows +%% to directly contact B and issue the necessary replies. Instead, the +%% chain forms a ring: D sends the message on to A: D does not +%% distinguish A as the sender, merely as the next member (downstream) +%% within the chain (which has now become a ring). When A receives +%% from D messages that A sent, it knows that all members have +%% received the message. However, the message is not dead yet: if C +%% died as B was sending to C, then B would need to detect the death +%% of C and forward the message on to D instead: thus every node has +%% to remember every message published until it is told that it can +%% forget about the message. This is essential not just for dealing +%% with failure of members, but also for the addition of new members. +%% +%% Thus once A receives the message back again, it then sends to B an +%% acknowledgement for the message, indicating that B can now forget +%% about the message. B does so, and forwards the ack to C. C forgets +%% the message, and forwards the ack to D, which forgets the message +%% and finally forwards the ack back to A. At this point, A takes no +%% further action: the message and its acknowledgement have made it to +%% every member of the group. The message is now dead, and any new +%% member joining the group at this point will not receive the +%% message. +%% +%% We therefore have two roles: +%% +%% 1. The sender, who upon receiving their own messages back, must +%% then send out acknowledgements, and upon receiving their own +%% acknowledgements back perform no further action. +%% +%% 2. The other group members who upon receiving messages and +%% acknowledgements must update their own internal state accordingly +%% (the sending member must also do this in order to be able to +%% accommodate failures), and forwards messages on to their downstream +%% neighbours. +%% +%% +%% Implementation: It gets trickier +%% -------------------------------- +%% +%% Chain A -> B -> C -> D +%% +%% A publishes a message which B receives. A now dies. B and D will +%% detect the death of A, and will link up, thus the chain is now B -> +%% C -> D. B forwards A's message on to C, who forwards it to D, who +%% forwards it to B. Thus B is now responsible for A's messages - both +%% publications and acknowledgements that were in flight at the point +%% at which A died. Even worse is that this is transitive: after B +%% forwards A's message to C, B dies as well. Now C is not only +%% responsible for B's in-flight messages, but is also responsible for +%% A's in-flight messages. +%% +%% Lemma 1: A member can only determine which dead members they have +%% inherited responsibility for if there is a total ordering on the +%% conflicting additions and subtractions of members from the group. +%% +%% Consider the simultaneous death of B and addition of B' that +%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or +%% C is responsible for in-flight messages from B. It is easy to +%% ensure that at least one of them thinks they have inherited B, but +%% if we do not ensure that exactly one of them inherits B, then we +%% could have B' converting publishes to acks, which then will crash C +%% as C does not believe it has issued acks for those messages. +%% +%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E +%% becoming A -> C' -> E. Who has inherited which of B, C and D? +%% +%% However, for non-conflicting membership changes, only a partial +%% ordering is required. For example, A -> B -> C becoming A -> A' -> +%% B. The addition of A', between A and B can have no conflicts with +%% the death of C: it is clear that A has inherited C's messages. +%% +%% For ease of implementation, we adopt the simple solution, of +%% imposing a total order on all membership changes. +%% +%% On the death of a member, it is ensured the dead member's +%% neighbours become aware of the death, and the upstream neighbour +%% now sends to its new downstream neighbour its state, including the +%% messages pending acknowledgement. The downstream neighbour can then +%% use this to calculate which publishes and acknowledgements it has +%% missed out on, due to the death of its old upstream. Thus the +%% downstream can catch up, and continues the propagation of messages +%% through the group. +%% +%% Lemma 2: When a member is joining, it must synchronously +%% communicate with its upstream member in order to receive its +%% starting state atomically with its addition to the group. +%% +%% New members must start with the same state as their nearest +%% upstream neighbour. This ensures that it is not surprised by +%% acknowledgements they are sent, and that should their downstream +%% neighbour die, they are able to send the correct state to their new +%% downstream neighbour to ensure it can catch up. Thus in the +%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' -> +%% C, A' must start with the state of A, so that it can send C the +%% correct state when B dies, allowing C to detect any missed +%% messages. +%% +%% If A' starts by adding itself to the group membership, A could then +%% die, without A' having received the necessary state from A. This +%% would leave A' responsible for in-flight messages from A, but +%% having the least knowledge of all, of those messages. Thus A' must +%% start by synchronously calling A, which then immediately sends A' +%% back its state. A then adds A' to the group. If A dies at this +%% point then A' will be able to see this (as A' will fail to appear +%% in the group membership), and thus A' will ignore the state it +%% receives from A, and will simply repeat the process, trying to now +%% join downstream from some other member. This ensures that should +%% the upstream die as soon as the new member has been joined, the new +%% member is guaranteed to receive the correct state, allowing it to +%% correctly process messages inherited due to the death of its +%% upstream neighbour. +%% +%% The canonical definition of the group membership is held by a +%% distributed database. Whilst this allows the total ordering of +%% changes to be achieved, it is nevertheless undesirable to have to +%% query this database for the current view, upon receiving each +%% message. Instead, we wish for members to be able to cache a view of +%% the group membership, which then requires a cache invalidation +%% mechanism. Each member maintains its own view of the group +%% membership. Thus when the group's membership changes, members may +%% need to become aware of such changes in order to be able to +%% accurately process messages they receive. Because of the +%% requirement of a total ordering of conflicting membership changes, +%% it is not possible to use the guaranteed broadcast mechanism to +%% communicate these changes: to achieve the necessary ordering, it +%% would be necessary for such messages to be published by exactly one +%% member, which can not be guaranteed given that such a member could +%% die. +%% +%% The total ordering we enforce on membership changes gives rise to a +%% view version number: every change to the membership creates a +%% different view, and the total ordering permits a simple +%% monotonically increasing view version number. +%% +%% Lemma 3: If a message is sent from a member that holds view version +%% N, it can be correctly processed by any member receiving the +%% message with a view version >= N. +%% +%% Initially, let us suppose that each view contains the ordering of +%% every member that was ever part of the group. Dead members are +%% marked as such. Thus we have a ring of members, some of which are +%% dead, and are thus inherited by the nearest alive downstream +%% member. +%% +%% In the chain A -> B -> C, all three members initially have view +%% version 1, which reflects reality. B publishes a message, which is +%% forward by C to A. B now dies, which A notices very quickly. Thus A +%% updates the view, creating version 2. It now forwards B's +%% publication, sending that message to its new downstream neighbour, +%% C. This happens before C is aware of the death of B. C must become +%% aware of the view change before it interprets the message its +%% received, otherwise it will fail to learn of the death of B, and +%% thus will not realise it has inherited B's messages (and will +%% likely crash). +%% +%% Thus very simply, we have that each subsequent view contains more +%% information than the preceding view. +%% +%% However, to avoid the views growing indefinitely, we need to be +%% able to delete members which have died _and_ for which no messages +%% are in-flight. This requires that upon inheriting a dead member, we +%% know the last publication sent by the dead member (this is easy: we +%% inherit a member because we are the nearest downstream member which +%% implies that we know at least as much than everyone else about the +%% publications of the dead member), and we know the earliest message +%% for which the acknowledgement is still in flight. +%% +%% In the chain A -> B -> C, when B dies, A will send to C its state +%% (as C is the new downstream from A), allowing C to calculate which +%% messages it has missed out on (described above). At this point, C +%% also inherits B's messages. If that state from A also includes the +%% last message published by B for which an acknowledgement has been +%% seen, then C knows exactly which further acknowledgements it must +%% receive (also including issuing acknowledgements for publications +%% still in-flight that it receives), after which it is known there +%% are no more messages in flight for B, thus all evidence that B was +%% ever part of the group can be safely removed from the canonical +%% group membership. +%% +%% Thus, for every message that a member sends, it includes with that +%% message its view version. When a member receives a message it will +%% update its view from the canonical copy, should its view be older +%% than the view version included in the message it has received. +%% +%% The state held by each member therefore includes the messages from +%% each publisher pending acknowledgement, the last publication seen +%% from that publisher, and the last acknowledgement from that +%% publisher. In the case of the member's own publications or +%% inherited members, this last acknowledgement seen state indicates +%% the last acknowledgement retired, rather than sent. +%% +%% +%% Proof sketch +%% ------------ +%% +%% We need to prove that with the provided operational semantics, we +%% can never reach a state that is not well formed from a well-formed +%% starting state. +%% +%% Operational semantics (small step): straight-forward message +%% sending, process monitoring, state updates. +%% +%% Well formed state: dead members inherited by exactly one non-dead +%% member; for every entry in anyone's pending-acks, either (the +%% publication of the message is in-flight downstream from the member +%% and upstream from the publisher) or (the acknowledgement of the +%% message is in-flight downstream from the publisher and upstream +%% from the member). +%% +%% Proof by induction on the applicable operational semantics. +%% +%% +%% Related work +%% ------------ +%% +%% The ring configuration and double traversal of messages around the +%% ring is similar (though developed independently) to the LCR +%% protocol by [Levy 2008]. However, LCR differs in several +%% ways. Firstly, by using vector clocks, it enforces a total order of +%% message delivery, which is unnecessary for our purposes. More +%% significantly, it is built on top of a "group communication system" +%% which performs the group management functions, taking +%% responsibility away from the protocol as to how to cope with safely +%% adding and removing members. When membership changes do occur, the +%% protocol stipulates that every member must perform communication +%% with every other member of the group, to ensure all outstanding +%% deliveries complete, before the entire group transitions to the new +%% view. This, in total, requires two sets of all-to-all synchronous +%% communications. +%% +%% This is not only rather inefficient, but also does not explain what +%% happens upon the failure of a member during this process. It does +%% though entirely avoid the need for inheritance of responsibility of +%% dead members that our protocol incorporates. +%% +%% In [Marandi et al 2010], a Paxos-based protocol is described. This +%% work explicitly focuses on the efficiency of communication. LCR +%% (and our protocol too) are more efficient, but at the cost of +%% higher latency. The Ring-Paxos protocol is itself built on top of +%% IP-multicast, which rules it out for many applications where +%% point-to-point communication is all that can be required. They also +%% have an excellent related work section which I really ought to +%% read... +%% +%% +%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008. +%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast +%% Protocol + + +-behaviour(gen_server2). + +-export([create_tables/0, start_link/3, leave/1, broadcast/2, + confirmed_broadcast/2, group_members/1]). + +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3, prioritise_info/2]). + +-export([behaviour_info/1]). + +-export([table_definitions/0]). + +-define(GROUP_TABLE, gm_group). +-define(HIBERNATE_AFTER_MIN, 1000). +-define(DESIRED_HIBERNATE, 10000). +-define(SETS, ordsets). +-define(DICT, orddict). + +-record(state, + { self, + left, + right, + group_name, + module, + view, + pub_count, + members_state, + callback_args, + confirms + }). + +-record(gm_group, { name, version, members }). + +-record(view_member, { id, aliases, left, right }). + +-record(member, { pending_ack, last_pub, last_ack }). + +-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group}, + {attributes, record_info(fields, gm_group)}]}). +-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}). + +-define(TAG, '$gm'). + +-ifdef(use_specs). + +-export_type([group_name/0]). + +-type(group_name() :: any()). + +-spec(create_tables/0 :: () -> 'ok'). +-spec(start_link/3 :: (group_name(), atom(), [any()]) -> + {'ok', pid()} | {'error', any()}). +-spec(leave/1 :: (pid()) -> 'ok'). +-spec(broadcast/2 :: (pid(), any()) -> 'ok'). +-spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok'). +-spec(group_members/1 :: (pid()) -> [pid()]). + +-endif. + +behaviour_info(callbacks) -> + [ + %% Called when we've successfully joined the group. Supplied with + %% Args provided in start_link, plus current group members. + {joined, 2}, + + %% Supplied with Args provided in start_link, the list of new + %% members and the list of members previously known to us that + %% have since died. Note that if a member joins and dies very + %% quickly, it's possible that we will never see that member + %% appear in either births or deaths. However we are guaranteed + %% that (1) we will see a member joining either in the births + %% here, or in the members passed to joined/1 before receiving + %% any messages from it; and (2) we will not see members die that + %% we have not seen born (or supplied in the members to + %% joined/1). + {members_changed, 3}, + + %% Supplied with Args provided in start_link, the sender, and the + %% message. This does get called for messages injected by this + %% member, however, in such cases, there is no special + %% significance of this call: it does not indicate that the + %% message has made it to any other members, let alone all other + %% members. + {handle_msg, 3}, + + %% Called on gm member termination as per rules in gen_server, + %% with the Args provided in start_link plus the termination + %% Reason. + {terminate, 2} + ]; +behaviour_info(_Other) -> + undefined. + +create_tables() -> + create_tables([?TABLE]). + +create_tables([]) -> + ok; +create_tables([{Table, Attributes} | Tables]) -> + case mnesia:create_table(Table, Attributes) of + {atomic, ok} -> create_tables(Tables); + {aborted, {already_exists, gm_group}} -> create_tables(Tables); + Err -> Err + end. + +table_definitions() -> + {Name, Attributes} = ?TABLE, + [{Name, [?TABLE_MATCH | Attributes]}]. + +start_link(GroupName, Module, Args) -> + gen_server2:start_link(?MODULE, [GroupName, Module, Args], []). + +leave(Server) -> + gen_server2:cast(Server, leave). + +broadcast(Server, Msg) -> + gen_server2:cast(Server, {broadcast, Msg}). + +confirmed_broadcast(Server, Msg) -> + gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity). + +group_members(Server) -> + gen_server2:call(Server, group_members, infinity). + + +init([GroupName, Module, Args]) -> + random:seed(now()), + gen_server2:cast(self(), join), + Self = self(), + {ok, #state { self = Self, + left = {Self, undefined}, + right = {Self, undefined}, + group_name = GroupName, + module = Module, + view = undefined, + pub_count = 0, + members_state = undefined, + callback_args = Args, + confirms = queue:new() }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. + + +handle_call({confirmed_broadcast, _Msg}, _From, + State = #state { members_state = undefined }) -> + reply(not_joined, State); + +handle_call({confirmed_broadcast, Msg}, _From, + State = #state { self = Self, + right = {Self, undefined}, + module = Module, + callback_args = Args }) -> + handle_callback_result({Module:handle_msg(Args, Self, Msg), ok, State}); + +handle_call({confirmed_broadcast, Msg}, From, State) -> + internal_broadcast(Msg, From, State); + +handle_call(group_members, _From, + State = #state { members_state = undefined }) -> + reply(not_joined, State); + +handle_call(group_members, _From, State = #state { view = View }) -> + reply(alive_view_members(View), State); + +handle_call({add_on_right, _NewMember}, _From, + State = #state { members_state = undefined }) -> + reply(not_ready, State); + +handle_call({add_on_right, NewMember}, _From, + State = #state { self = Self, + group_name = GroupName, + view = View, + members_state = MembersState, + module = Module, + callback_args = Args }) -> + Group = record_new_member_in_group( + GroupName, Self, NewMember, + fun (Group1) -> + View1 = group_to_view(Group1), + ok = send_right(NewMember, View1, + {catchup, Self, prepare_members_state( + MembersState)}) + end), + View2 = group_to_view(Group), + State1 = check_neighbours(State #state { view = View2 }), + Result = callback_view_changed(Args, Module, View, View2), + handle_callback_result({Result, {ok, Group}, State1}). + + +handle_cast({?TAG, ReqVer, Msg}, + State = #state { view = View, + group_name = GroupName, + module = Module, + callback_args = Args }) -> + {Result, State1} = + case needs_view_update(ReqVer, View) of + true -> + View1 = group_to_view(read_group(GroupName)), + {callback_view_changed(Args, Module, View, View1), + check_neighbours(State #state { view = View1 })}; + false -> + {ok, State} + end, + handle_callback_result( + if_callback_success( + Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1)); + +handle_cast({broadcast, _Msg}, State = #state { members_state = undefined }) -> + noreply(State); + +handle_cast({broadcast, Msg}, + State = #state { self = Self, + right = {Self, undefined}, + module = Module, + callback_args = Args }) -> + handle_callback_result({Module:handle_msg(Args, Self, Msg), State}); + +handle_cast({broadcast, Msg}, State) -> + internal_broadcast(Msg, none, State); + +handle_cast(join, State = #state { self = Self, + group_name = GroupName, + members_state = undefined, + module = Module, + callback_args = Args }) -> + View = join_group(Self, GroupName), + MembersState = + case alive_view_members(View) of + [Self] -> blank_member_state(); + _ -> undefined + end, + State1 = check_neighbours(State #state { view = View, + members_state = MembersState }), + handle_callback_result( + {Module:joined(Args, all_known_members(View)), State1}); + +handle_cast(leave, State) -> + {stop, normal, State}. + + +handle_info({'DOWN', MRef, process, _Pid, _Reason}, + State = #state { self = Self, + left = Left, + right = Right, + group_name = GroupName, + view = View, + module = Module, + callback_args = Args, + confirms = Confirms }) -> + Member = case {Left, Right} of + {{Member1, MRef}, _} -> Member1; + {_, {Member1, MRef}} -> Member1; + _ -> undefined + end, + case Member of + undefined -> + noreply(State); + _ -> + View1 = + group_to_view(record_dead_member_in_group(Member, GroupName)), + State1 = State #state { view = View1 }, + {Result, State2} = + case alive_view_members(View1) of + [Self] -> + maybe_erase_aliases( + State1 #state { + members_state = blank_member_state(), + confirms = purge_confirms(Confirms) }); + _ -> + %% here we won't be pointing out any deaths: + %% the concern is that there maybe births + %% which we'd otherwise miss. + {callback_view_changed(Args, Module, View, View1), + State1} + end, + handle_callback_result({Result, check_neighbours(State2)}) + end. + + +terminate(Reason, #state { module = Module, + callback_args = Args }) -> + Module:terminate(Args, Reason). + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; +prioritise_info(_ , _State) -> 0. + + +handle_msg(check_neighbours, State) -> + %% no-op - it's already been done by the calling handle_cast + {ok, State}; + +handle_msg({catchup, Left, MembersStateLeft}, + State = #state { self = Self, + left = {Left, _MRefL}, + right = {Right, _MRefR}, + view = View, + members_state = undefined }) -> + ok = send_right(Right, View, {catchup, Self, MembersStateLeft}), + MembersStateLeft1 = build_members_state(MembersStateLeft), + {ok, State #state { members_state = MembersStateLeft1 }}; + +handle_msg({catchup, Left, MembersStateLeft}, + State = #state { self = Self, + left = {Left, _MRefL}, + view = View, + members_state = MembersState }) + when MembersState =/= undefined -> + MembersStateLeft1 = build_members_state(MembersStateLeft), + AllMembers = lists:usort(?DICT:fetch_keys(MembersState) ++ + ?DICT:fetch_keys(MembersStateLeft1)), + {MembersState1, Activity} = + lists:foldl( + fun (Id, MembersStateActivity) -> + #member { pending_ack = PALeft, last_ack = LA } = + find_member_or_blank(Id, MembersStateLeft1), + with_member_acc( + fun (#member { pending_ack = PA } = Member, Activity1) -> + case is_member_alias(Id, Self, View) of + true -> + {_AcksInFlight, Pubs, _PA1} = + find_prefix_common_suffix(PALeft, PA), + {Member #member { last_ack = LA }, + activity_cons(Id, pubs_from_queue(Pubs), + [], Activity1)}; + false -> + {Acks, _Common, Pubs} = + find_prefix_common_suffix(PA, PALeft), + {Member, + activity_cons(Id, pubs_from_queue(Pubs), + acks_from_queue(Acks), + Activity1)} + end + end, Id, MembersStateActivity) + end, {MembersState, activity_nil()}, AllMembers), + handle_msg({activity, Left, activity_finalise(Activity)}, + State #state { members_state = MembersState1 }); + +handle_msg({catchup, _NotLeft, _MembersState}, State) -> + {ok, State}; + +handle_msg({activity, Left, Activity}, + State = #state { self = Self, + left = {Left, _MRefL}, + view = View, + members_state = MembersState, + confirms = Confirms }) + when MembersState =/= undefined -> + {MembersState1, {Confirms1, Activity1}} = + lists:foldl( + fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) -> + with_member_acc( + fun (Member = #member { pending_ack = PA, + last_pub = LP, + last_ack = LA }, + {Confirms2, Activity2}) -> + case is_member_alias(Id, Self, View) of + true -> + {ToAck, PA1} = + find_common(queue_from_pubs(Pubs), PA, + queue:new()), + LA1 = last_ack(Acks, LA), + AckNums = acks_from_queue(ToAck), + Confirms3 = maybe_confirm( + Self, Id, Confirms2, AckNums), + {Member #member { pending_ack = PA1, + last_ack = LA1 }, + {Confirms3, + activity_cons( + Id, [], AckNums, Activity2)}}; + false -> + PA1 = apply_acks(Acks, join_pubs(PA, Pubs)), + LA1 = last_ack(Acks, LA), + LP1 = last_pub(Pubs, LP), + {Member #member { pending_ack = PA1, + last_pub = LP1, + last_ack = LA1 }, + {Confirms2, + activity_cons(Id, Pubs, Acks, Activity2)}} + end + end, Id, MembersStateConfirmsActivity) + end, {MembersState, {Confirms, activity_nil()}}, Activity), + State1 = State #state { members_state = MembersState1, + confirms = Confirms1 }, + Activity3 = activity_finalise(Activity1), + {Result, State2} = maybe_erase_aliases(State1), + ok = maybe_send_activity(Activity3, State2), + if_callback_success( + Result, fun activity_true/3, fun activity_false/3, Activity3, State2); + +handle_msg({activity, _NotLeft, _Activity}, State) -> + {ok, State}. + + +noreply(State) -> + {noreply, State, hibernate}. + +reply(Reply, State) -> + {reply, Reply, State, hibernate}. + +internal_broadcast(Msg, From, State = #state { self = Self, + pub_count = PubCount, + members_state = MembersState, + module = Module, + confirms = Confirms, + callback_args = Args }) -> + PubMsg = {PubCount, Msg}, + Activity = activity_cons(Self, [PubMsg], [], activity_nil()), + ok = maybe_send_activity(activity_finalise(Activity), State), + MembersState1 = + with_member( + fun (Member = #member { pending_ack = PA }) -> + Member #member { pending_ack = queue:in(PubMsg, PA) } + end, Self, MembersState), + Confirms1 = case From of + none -> Confirms; + _ -> queue:in({PubCount, From}, Confirms) + end, + handle_callback_result({Module:handle_msg(Args, Self, Msg), + State #state { pub_count = PubCount + 1, + members_state = MembersState1, + confirms = Confirms1 }}). + + +%% --------------------------------------------------------------------------- +%% View construction and inspection +%% --------------------------------------------------------------------------- + +needs_view_update(ReqVer, {Ver, _View}) -> + Ver < ReqVer. + +view_version({Ver, _View}) -> + Ver. + +is_member_alive({dead, _Member}) -> false; +is_member_alive(_) -> true. + +is_member_alias(Self, Self, _View) -> + true; +is_member_alias(Member, Self, View) -> + ?SETS:is_element(Member, + ((fetch_view_member(Self, View)) #view_member.aliases)). + +dead_member_id({dead, Member}) -> Member. + +store_view_member(VMember = #view_member { id = Id }, {Ver, View}) -> + {Ver, ?DICT:store(Id, VMember, View)}. + +with_view_member(Fun, View, Id) -> + store_view_member(Fun(fetch_view_member(Id, View)), View). + +fetch_view_member(Id, {_Ver, View}) -> + ?DICT:fetch(Id, View). + +find_view_member(Id, {_Ver, View}) -> + ?DICT:find(Id, View). + +blank_view(Ver) -> + {Ver, ?DICT:new()}. + +alive_view_members({_Ver, View}) -> + ?DICT:fetch_keys(View). + +all_known_members({_Ver, View}) -> + ?DICT:fold( + fun (Member, #view_member { aliases = Aliases }, Acc) -> + ?SETS:to_list(Aliases) ++ [Member | Acc] + end, [], View). + +group_to_view(#gm_group { members = Members, version = Ver }) -> + Alive = lists:filter(fun is_member_alive/1, Members), + [_|_] = Alive, %% ASSERTION - can't have all dead members + add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members). + +link_view([Left, Middle, Right | Rest], View) -> + case find_view_member(Middle, View) of + error -> + link_view( + [Middle, Right | Rest], + store_view_member(#view_member { id = Middle, + aliases = ?SETS:new(), + left = Left, + right = Right }, View)); + {ok, _} -> + View + end; +link_view(_, View) -> + View. + +add_aliases(View, Members) -> + Members1 = ensure_alive_suffix(Members), + {EmptyDeadSet, View1} = + lists:foldl( + fun (Member, {DeadAcc, ViewAcc}) -> + case is_member_alive(Member) of + true -> + {?SETS:new(), + with_view_member( + fun (VMember = + #view_member { aliases = Aliases }) -> + VMember #view_member { + aliases = ?SETS:union(Aliases, DeadAcc) } + end, ViewAcc, Member)}; + false -> + {?SETS:add_element(dead_member_id(Member), DeadAcc), + ViewAcc} + end + end, {?SETS:new(), View}, Members1), + 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION + View1. + +ensure_alive_suffix(Members) -> + queue:to_list(ensure_alive_suffix1(queue:from_list(Members))). + +ensure_alive_suffix1(MembersQ) -> + {{value, Member}, MembersQ1} = queue:out_r(MembersQ), + case is_member_alive(Member) of + true -> MembersQ; + false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1)) + end. + + +%% --------------------------------------------------------------------------- +%% View modification +%% --------------------------------------------------------------------------- + +join_group(Self, GroupName) -> + join_group(Self, GroupName, read_group(GroupName)). + +join_group(Self, GroupName, {error, not_found}) -> + join_group(Self, GroupName, prune_or_create_group(Self, GroupName)); +join_group(Self, _GroupName, #gm_group { members = [Self] } = Group) -> + group_to_view(Group); +join_group(Self, GroupName, #gm_group { members = Members } = Group) -> + case lists:member(Self, Members) of + true -> + group_to_view(Group); + false -> + case lists:filter(fun is_member_alive/1, Members) of + [] -> + join_group(Self, GroupName, + prune_or_create_group(Self, GroupName)); + Alive -> + Left = lists:nth(random:uniform(length(Alive)), Alive), + try + case gen_server2:call( + Left, {add_on_right, Self}, infinity) of + {ok, Group1} -> group_to_view(Group1); + not_ready -> join_group(Self, GroupName) + end + catch + exit:{R, _} + when R =:= noproc; R =:= normal; R =:= shutdown -> + join_group( + Self, GroupName, + record_dead_member_in_group(Left, GroupName)) + end + end + end. + +read_group(GroupName) -> + case mnesia:dirty_read(?GROUP_TABLE, GroupName) of + [] -> {error, not_found}; + [Group] -> Group + end. + +prune_or_create_group(Self, GroupName) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> GroupNew = #gm_group { name = GroupName, + members = [Self], + version = 0 }, + case mnesia:read(?GROUP_TABLE, GroupName) of + [] -> + mnesia:write(GroupNew), + GroupNew; + [Group1 = #gm_group { members = Members }] -> + case lists:any(fun is_member_alive/1, Members) of + true -> Group1; + false -> mnesia:write(GroupNew), + GroupNew + end + end + end), + Group. + +record_dead_member_in_group(Member, GroupName) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = + mnesia:read(?GROUP_TABLE, GroupName), + case lists:splitwith( + fun (Member1) -> Member1 =/= Member end, Members) of + {_Members1, []} -> %% not found - already recorded dead + Group1; + {Members1, [Member | Members2]} -> + Members3 = Members1 ++ [{dead, Member} | Members2], + Group2 = Group1 #gm_group { members = Members3, + version = Ver + 1 }, + mnesia:write(Group2), + Group2 + end + end), + Group. + +record_new_member_in_group(GroupName, Left, NewMember, Fun) -> + {atomic, Group} = + mnesia:sync_transaction( + fun () -> + [#gm_group { members = Members, version = Ver } = Group1] = + mnesia:read(?GROUP_TABLE, GroupName), + {Prefix, [Left | Suffix]} = + lists:splitwith(fun (M) -> M =/= Left end, Members), + Members1 = Prefix ++ [Left, NewMember | Suffix], + Group2 = Group1 #gm_group { members = Members1, + version = Ver + 1 }, + ok = Fun(Group2), + mnesia:write(Group2), + Group2 + end), + Group. + +erase_members_in_group(Members, GroupName) -> + DeadMembers = [{dead, Id} || Id <- Members], + {atomic, Group} = + mnesia:sync_transaction( + fun () -> + [Group1 = #gm_group { members = [_|_] = Members1, + version = Ver }] = + mnesia:read(?GROUP_TABLE, GroupName), + case Members1 -- DeadMembers of + Members1 -> Group1; + Members2 -> Group2 = + Group1 #gm_group { members = Members2, + version = Ver + 1 }, + mnesia:write(Group2), + Group2 + end + end), + Group. + +maybe_erase_aliases(State = #state { self = Self, + group_name = GroupName, + view = View, + members_state = MembersState, + module = Module, + callback_args = Args }) -> + #view_member { aliases = Aliases } = fetch_view_member(Self, View), + {Erasable, MembersState1} + = ?SETS:fold( + fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> + #member { last_pub = LP, last_ack = LA } = + find_member_or_blank(Id, MembersState), + case can_erase_view_member(Self, Id, LA, LP) of + true -> {[Id | ErasableAcc], + erase_member(Id, MembersStateAcc)}; + false -> Acc + end + end, {[], MembersState}, Aliases), + State1 = State #state { members_state = MembersState1 }, + case Erasable of + [] -> {ok, State1}; + _ -> View1 = group_to_view( + erase_members_in_group(Erasable, GroupName)), + {callback_view_changed(Args, Module, View, View1), + State1 #state { view = View1 }} + end. + +can_erase_view_member(Self, Self, _LA, _LP) -> false; +can_erase_view_member(_Self, _Id, N, N) -> true; +can_erase_view_member(_Self, _Id, _LA, _LP) -> false. + + +%% --------------------------------------------------------------------------- +%% View monitoring and maintanence +%% --------------------------------------------------------------------------- + +ensure_neighbour(_Ver, Self, {Self, undefined}, Self) -> + {Self, undefined}; +ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) -> + ok = gen_server2:cast(RealNeighbour, {?TAG, Ver, check_neighbours}), + {RealNeighbour, maybe_monitor(RealNeighbour, Self)}; +ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) -> + {RealNeighbour, MRef}; +ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) -> + true = erlang:demonitor(MRef), + Msg = {?TAG, Ver, check_neighbours}, + ok = gen_server2:cast(RealNeighbour, Msg), + ok = case Neighbour of + Self -> ok; + _ -> gen_server2:cast(Neighbour, Msg) + end, + {Neighbour, maybe_monitor(Neighbour, Self)}. + +maybe_monitor(Self, Self) -> + undefined; +maybe_monitor(Other, _Self) -> + erlang:monitor(process, Other). + +check_neighbours(State = #state { self = Self, + left = Left, + right = Right, + view = View }) -> + #view_member { left = VLeft, right = VRight } + = fetch_view_member(Self, View), + Ver = view_version(View), + Left1 = ensure_neighbour(Ver, Self, Left, VLeft), + Right1 = ensure_neighbour(Ver, Self, Right, VRight), + State1 = State #state { left = Left1, right = Right1 }, + ok = maybe_send_catchup(Right, State1), + State1. + +maybe_send_catchup(Right, #state { right = Right }) -> + ok; +maybe_send_catchup(_Right, #state { self = Self, + right = {Self, undefined} }) -> + ok; +maybe_send_catchup(_Right, #state { members_state = undefined }) -> + ok; +maybe_send_catchup(_Right, #state { self = Self, + right = {Right, _MRef}, + view = View, + members_state = MembersState }) -> + send_right(Right, View, + {catchup, Self, prepare_members_state(MembersState)}). + + +%% --------------------------------------------------------------------------- +%% Catch_up delta detection +%% --------------------------------------------------------------------------- + +find_prefix_common_suffix(A, B) -> + {Prefix, A1} = find_prefix(A, B, queue:new()), + {Common, Suffix} = find_common(A1, B, queue:new()), + {Prefix, Common, Suffix}. + +%% Returns the elements of A that occur before the first element of B, +%% plus the remainder of A. +find_prefix(A, B, Prefix) -> + case {queue:out(A), queue:out(B)} of + {{{value, Val}, _A1}, {{value, Val}, _B1}} -> + {Prefix, A}; + {{empty, A1}, {{value, _A}, _B1}} -> + {Prefix, A1}; + {{{value, {NumA, _MsgA} = Val}, A1}, + {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB -> + find_prefix(A1, B, queue:in(Val, Prefix)); + {_, {empty, _B1}} -> + {A, Prefix} %% Prefix well be empty here + end. + +%% A should be a prefix of B. Returns the commonality plus the +%% remainder of B. +find_common(A, B, Common) -> + case {queue:out(A), queue:out(B)} of + {{{value, Val}, A1}, {{value, Val}, B1}} -> + find_common(A1, B1, queue:in(Val, Common)); + {{empty, _A}, _} -> + {Common, B} + end. + + +%% --------------------------------------------------------------------------- +%% Members helpers +%% --------------------------------------------------------------------------- + +with_member(Fun, Id, MembersState) -> + store_member( + Id, Fun(find_member_or_blank(Id, MembersState)), MembersState). + +with_member_acc(Fun, Id, {MembersState, Acc}) -> + {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc), + {store_member(Id, MemberState, MembersState), Acc1}. + +find_member_or_blank(Id, MembersState) -> + case ?DICT:find(Id, MembersState) of + {ok, Result} -> Result; + error -> blank_member() + end. + +erase_member(Id, MembersState) -> + ?DICT:erase(Id, MembersState). + +blank_member() -> + #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }. + +blank_member_state() -> + ?DICT:new(). + +store_member(Id, MemberState, MembersState) -> + ?DICT:store(Id, MemberState, MembersState). + +prepare_members_state(MembersState) -> + ?DICT:to_list(MembersState). + +build_members_state(MembersStateList) -> + ?DICT:from_list(MembersStateList). + + +%% --------------------------------------------------------------------------- +%% Activity assembly +%% --------------------------------------------------------------------------- + +activity_nil() -> + queue:new(). + +activity_cons(_Id, [], [], Tail) -> + Tail; +activity_cons(Sender, Pubs, Acks, Tail) -> + queue:in({Sender, Pubs, Acks}, Tail). + +activity_finalise(Activity) -> + queue:to_list(Activity). + +maybe_send_activity([], _State) -> + ok; +maybe_send_activity(Activity, #state { self = Self, + right = {Right, _MRefR}, + view = View }) -> + send_right(Right, View, {activity, Self, Activity}). + +send_right(Right, View, Msg) -> + ok = gen_server2:cast(Right, {?TAG, view_version(View), Msg}). + +callback(Args, Module, Activity) -> + lists:foldl( + fun ({Id, Pubs, _Acks}, ok) -> + lists:foldl(fun ({_PubNum, Pub}, ok) -> + Module:handle_msg(Args, Id, Pub); + (_, Error) -> + Error + end, ok, Pubs); + (_, Error) -> + Error + end, ok, Activity). + +callback_view_changed(Args, Module, OldView, NewView) -> + OldMembers = all_known_members(OldView), + NewMembers = all_known_members(NewView), + Births = NewMembers -- OldMembers, + Deaths = OldMembers -- NewMembers, + case {Births, Deaths} of + {[], []} -> ok; + _ -> Module:members_changed(Args, Births, Deaths) + end. + +handle_callback_result({Result, State}) -> + if_callback_success( + Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State); +handle_callback_result({Result, Reply, State}) -> + if_callback_success( + Result, fun reply_true/3, fun reply_false/3, Reply, State). + +no_reply_true (_Result, _Undefined, State) -> noreply(State). +no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}. + +reply_true (_Result, Reply, State) -> reply(Reply, State). +reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}. + +handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State). +handle_msg_false(Result, _Msg, State) -> {Result, State}. + +activity_true(_Result, Activity, State = #state { module = Module, + callback_args = Args }) -> + {callback(Args, Module, Activity), State}. +activity_false(Result, _Activity, State) -> + {Result, State}. + +if_callback_success(ok, True, _False, Arg, State) -> + True(ok, Arg, State); +if_callback_success( + {become, Module, Args} = Result, True, _False, Arg, State) -> + True(Result, Arg, State #state { module = Module, + callback_args = Args }); +if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) -> + False(Result, Arg, State). + +maybe_confirm(_Self, _Id, Confirms, []) -> + Confirms; +maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) -> + case queue:out(Confirms) of + {empty, _Confirms} -> + Confirms; + {{value, {PubNum, From}}, Confirms1} -> + gen_server2:reply(From, ok), + maybe_confirm(Self, Self, Confirms1, PubNums); + {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum -> + maybe_confirm(Self, Self, Confirms, PubNums) + end; +maybe_confirm(_Self, _Id, Confirms, _PubNums) -> + Confirms. + +purge_confirms(Confirms) -> + [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)], + queue:new(). + + +%% --------------------------------------------------------------------------- +%% Msg transformation +%% --------------------------------------------------------------------------- + +acks_from_queue(Q) -> + [PubNum || {PubNum, _Msg} <- queue:to_list(Q)]. + +pubs_from_queue(Q) -> + queue:to_list(Q). + +queue_from_pubs(Pubs) -> + queue:from_list(Pubs). + +apply_acks([], Pubs) -> + Pubs; +apply_acks(List, Pubs) -> + {_, Pubs1} = queue:split(length(List), Pubs), + Pubs1. + +join_pubs(Q, []) -> Q; +join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)). + +last_ack([], LA) -> + LA; +last_ack(List, LA) -> + LA1 = lists:last(List), + true = LA1 > LA, %% ASSERTION + LA1. + +last_pub([], LP) -> + LP; +last_pub(List, LP) -> + {PubNum, _Msg} = lists:last(List), + true = PubNum > LP, %% ASSERTION + PubNum. diff --git a/src/gm_test.erl b/src/gm_test.erl new file mode 100644 index 00000000..e8f28598 --- /dev/null +++ b/src/gm_test.erl @@ -0,0 +1,126 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% + +-module(gm_test). + +-export([test/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +get_state() -> + get(state). + +with_state(Fun) -> + put(state, Fun(get_state())). + +inc() -> + case 1 + get(count) of + 100000 -> Now = os:timestamp(), + Start = put(ts, Now), + Diff = timer:now_diff(Now, Start), + Rate = 100000 / (Diff / 1000000), + io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), + put(count, 0); + N -> put(count, N) + end. + +joined([], Members) -> + io:format("Joined ~p (~p members)~n", [self(), length(Members)]), + put(state, dict:from_list([{Member, empty} || Member <- Members])), + put(count, 0), + put(ts, os:timestamp()), + ok. + +members_changed([], Births, Deaths) -> + with_state( + fun (State) -> + State1 = + lists:foldl( + fun (Born, StateN) -> + false = dict:is_key(Born, StateN), + dict:store(Born, empty, StateN) + end, State, Births), + lists:foldl( + fun (Died, StateN) -> + true = dict:is_key(Died, StateN), + dict:store(Died, died, StateN) + end, State1, Deaths) + end), + ok. + +handle_msg([], From, {test_msg, Num}) -> + inc(), + with_state( + fun (State) -> + ok = case dict:find(From, State) of + {ok, died} -> + exit({{from, From}, + {received_posthumous_delivery, Num}}); + {ok, empty} -> ok; + {ok, Num} -> ok; + {ok, Num1} when Num < Num1 -> + exit({{from, From}, + {duplicate_delivery_of, Num1}, + {expecting, Num}}); + {ok, Num1} -> + exit({{from, From}, + {missing_delivery_of, Num}, + {received_early, Num1}}); + error -> + exit({{from, From}, + {received_premature_delivery, Num}}) + end, + dict:store(From, Num + 1, State) + end), + ok. + +terminate([], Reason) -> + io:format("Left ~p (~p)~n", [self(), Reason]), + ok. + +spawn_member() -> + spawn_link( + fun () -> + random:seed(now()), + %% start up delay of no more than 10 seconds + timer:sleep(random:uniform(10000)), + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), + Start = random:uniform(10000), + send_loop(Pid, Start, Start + random:uniform(10000)), + gm:leave(Pid), + spawn_more() + end). + +spawn_more() -> + [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. + +send_loop(_Pid, Target, Target) -> + ok; +send_loop(Pid, Count, Target) when Target > Count -> + case random:uniform(3) of + 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); + _ -> gm:broadcast(Pid, {test_msg, Count}) + end, + timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms + send_loop(Pid, Count + 1, Target). + +test() -> + ok = gm:create_tables(), + spawn_member(), + spawn_member(). -- cgit v1.2.1 From a29958797d402243f9b36083f7d2f317eb9ed40f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 21 Jan 2011 12:10:41 +0000 Subject: bump year on copyrights --- include/gm_specs.hrl | 2 +- src/gm.erl | 2 +- src/gm_test.erl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index 7f607755..987866db 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -ifdef(use_specs). diff --git a/src/gm.erl b/src/gm.erl index baf46471..8fea9196 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(gm). diff --git a/src/gm_test.erl b/src/gm_test.erl index e8f28598..e0a92a0c 100644 --- a/src/gm_test.erl +++ b/src/gm_test.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(gm_test). -- cgit v1.2.1 From d887a84c64321582266051b9a26ac9a9f1d1f6f7 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 24 Jan 2011 17:40:26 +0000 Subject: Treat sender-specified destinations as routing keys rather than queue names --- src/rabbit_exchange.erl | 13 +++++-------- src/rabbit_exchange_type_direct.erl | 10 +++++----- src/rabbit_exchange_type_fanout.erl | 10 ++-------- src/rabbit_exchange_type_topic.erl | 15 ++++++++++----- 4 files changed, 22 insertions(+), 26 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 24079d22..a94e57f8 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). -export([callback/3]). --export([header_routes/2]). +-export([header_routes/1]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -89,8 +89,7 @@ (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). -spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). --spec(header_routes/2 :: (rabbit_framing:amqp_table(), rabbit_types:vhost()) -> - [rabbit_types:r('queue')]). +-spec(header_routes/1 :: (rabbit_framing:amqp_table()) -> [binary()]). -endif. %%---------------------------------------------------------------------------- @@ -326,12 +325,10 @@ unconditional_delete(X = #exchange{name = XName}) -> Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. -header_routes(undefined, _VHost) -> +header_routes(undefined) -> []; -header_routes(Headers, VHost) -> - [rabbit_misc:r(VHost, queue, RKey) || - RKey <- lists:flatten([routing_keys(Headers, Header) || - Header <- ?ROUTING_HEADERS])]. +header_routes(Headers) -> + lists:flatten([routing_keys(Headers, Header) || Header <- ?ROUTING_HEADERS]). routing_keys(HeadersTable, Key) -> case rabbit_misc:table_lookup(HeadersTable, Key) of diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index ade57451..97988381 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -51,13 +51,13 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. -route(#exchange{name = #resource{virtual_host = VHost} = Name}, +route(#exchange{name = Name}, #delivery{message = #basic_message{routing_key = RoutingKey, content = Content}}) -> - BindingRoutes = rabbit_router:match_routing_key(Name, RoutingKey), - HeaderRoutes = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers, VHost), - BindingRoutes ++ HeaderRoutes. + HeaderKeys = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers), + lists:flatten([rabbit_router:match_routing_key(Name, RKey) || + RKey <- [RoutingKey | HeaderKeys]]). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index f3716141..5266dd87 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -31,7 +31,6 @@ -module(rabbit_exchange_type_fanout). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -51,13 +50,8 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. -route(#exchange{name = #resource{virtual_host = VHost} = Name}, - #delivery{message = #basic_message{content = Content}}) -> - BindingRoutes = rabbit_router:match_routing_key(Name, '_'), - HeaderRoutes = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers, VHost), - BindingRoutes ++ HeaderRoutes. - +route(#exchange{name = Name}, _Delivery) -> + rabbit_router:match_routing_key(Name, '_'). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2f0d47a7..8f3c0550 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -30,6 +30,7 @@ %% -module(rabbit_exchange_type_topic). +-include("rabbit_framing.hrl"). -include("rabbit.hrl"). -behaviour(rabbit_exchange_type). @@ -59,11 +60,15 @@ description() -> {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey}}) -> - rabbit_router:match_bindings(Name, - fun (#binding{key = BindingKey}) -> - topic_matches(BindingKey, RoutingKey) - end). + #delivery{message = #basic_message{routing_key = RoutingKey, + content = Content}}) -> + HeaderKeys = rabbit_exchange:header_routes( + (Content#content.properties)#'P_basic'.headers), + lists:flatten([rabbit_router:match_bindings( + Name, + fun (#binding{key = BindingKey}) -> + topic_matches(BindingKey, RKey) + end) || RKey <- [RoutingKey | HeaderKeys]]). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). -- cgit v1.2.1 From 5f6b9f8881f55d67775df4db00cb513a037d649d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 26 Jan 2011 12:32:34 +0000 Subject: Change the new version format from: [{local, [...]}, {mnesia, [...]}]. to: [{rabbit, [{local, [...]}, {mnesia, [...]}]}]. This is to allow for future work allowing plugins to own upgrades (that can be ignored if the plugin is uninstalled), without having to change the format *again*. --- src/rabbit_upgrade.erl | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b222845d..f279029a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -163,7 +163,8 @@ read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [V]} -> case is_new_version(V) of false -> {ok, convert_old_version(V)}; - true -> {ok, V} + true -> [{rabbit, RV}] = V, + {ok, RV} end; {error, _} = Err -> Err end. @@ -175,13 +176,14 @@ read_version(Scope) -> end. write_version() -> - ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), + ok = rabbit_misc:write_term_file(schema_filename(), + [[{rabbit, desired_version()}]]), ok. write_version(Scope) -> {ok, V0} = read_version(), V = orddict:store(Scope, desired_version(Scope), V0), - ok = rabbit_misc:write_term_file(schema_filename(), [V]), + ok = rabbit_misc:write_term_file(schema_filename(), [[{rabbit, V}]]), ok. desired_version() -> -- cgit v1.2.1 From cf8e92bad03ef05487c1e2aec557557a17977e0e Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 27 Jan 2011 18:11:44 +0000 Subject: cosmetic; using accumulator in trie_match --- src/rabbit_exchange_type_topic.erl | 49 ++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2e181f1d..fdababe7 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -80,34 +80,27 @@ assert_args_equivalence(X, Args) -> %%---------------------------------------------------------------------------- trie_match(X, Words) -> - trie_match(X, root, Words). - -trie_match(X, Node, []) -> - FinalRes = trie_bindings(X, Node), - HashRes = case trie_child(X, Node, "#") of - {ok, HashNode} -> trie_match(X, HashNode, []); - error -> [] - end, - FinalRes ++ HashRes; -trie_match(X, Node, [W | RestW] = Words) -> - ExactRes = case trie_child(X, Node, W) of - {ok, NextNode} -> trie_match(X, NextNode, RestW); - error -> [] - end, - StarRes = case trie_child(X, Node, "*") of - {ok, StarNode} -> trie_match(X, StarNode, RestW); - error -> [] - end, - HashRes = case trie_child(X, Node, "#") of - {ok, HashNode} -> trie_match_skip_any(X, HashNode, Words); - error -> [] - end, - ExactRes ++ StarRes ++ HashRes. - -trie_match_skip_any(X, Node, []) -> - trie_match(X, Node, []); -trie_match_skip_any(X, Node, [_ | RestW] = Words) -> - trie_match(X, Node, Words) ++ trie_match_skip_any(X, Node, RestW). + trie_match(X, root, Words, []). + +trie_match(X, Node, [], ResAcc) -> + ResAcc1 = trie_bindings(X, Node) ++ ResAcc, + trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [], ResAcc1); +trie_match(X, Node, [W | RestW] = Words, ResAcc) -> + ResAcc1 = trie_match_part(X, Node, W, fun trie_match/4, RestW, ResAcc), + ResAcc2 = trie_match_part(X, Node, "*", fun trie_match/4, RestW, ResAcc1), + trie_match_part(X, Node, "#", fun trie_match_skip_any/4, Words, ResAcc2). + +trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) -> + case trie_child(X, Node, Search) of + {ok, NextNode} -> MatchFun(X, NextNode, RestW, ResAcc); + error -> ResAcc + end. + +trie_match_skip_any(X, Node, [], ResAcc) -> + trie_match(X, Node, [], ResAcc); +trie_match_skip_any(X, Node, [_ | RestW] = Words, ResAcc) -> + ResAcc1 = trie_match(X, Node, Words, ResAcc), + trie_match_skip_any(X, Node, RestW, ResAcc1). follow_down_create(X, Words) -> case follow_down_last_node(X, Words) of -- cgit v1.2.1 From 18680bf95808fd395f7bdd955320920288e9af2c Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 27 Jan 2011 19:18:49 +0000 Subject: cosmetic --- src/rabbit_exchange_type_topic.erl | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index fdababe7..0beaa714 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -83,12 +83,14 @@ trie_match(X, Words) -> trie_match(X, root, Words, []). trie_match(X, Node, [], ResAcc) -> - ResAcc1 = trie_bindings(X, Node) ++ ResAcc, - trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [], ResAcc1); + trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [], + trie_bindings(X, Node) ++ ResAcc); trie_match(X, Node, [W | RestW] = Words, ResAcc) -> - ResAcc1 = trie_match_part(X, Node, W, fun trie_match/4, RestW, ResAcc), - ResAcc2 = trie_match_part(X, Node, "*", fun trie_match/4, RestW, ResAcc1), - trie_match_part(X, Node, "#", fun trie_match_skip_any/4, Words, ResAcc2). + lists:foldl(fun ({WArg, MatchFun, RestWArg}, Acc) -> + trie_match_part(X, Node, WArg, MatchFun, RestWArg, Acc) + end, ResAcc, [{W, fun trie_match/4, RestW}, + {"*", fun trie_match/4, RestW}, + {"#", fun trie_match_skip_any/4, Words}]). trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) -> case trie_child(X, Node, Search) of @@ -99,8 +101,8 @@ trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) -> trie_match_skip_any(X, Node, [], ResAcc) -> trie_match(X, Node, [], ResAcc); trie_match_skip_any(X, Node, [_ | RestW] = Words, ResAcc) -> - ResAcc1 = trie_match(X, Node, Words, ResAcc), - trie_match_skip_any(X, Node, RestW, ResAcc1). + trie_match_skip_any(X, Node, RestW, + trie_match(X, Node, Words, ResAcc)). follow_down_create(X, Words) -> case follow_down_last_node(X, Words) of -- cgit v1.2.1 From 8304d8f8a8618b6e3aae73c18b4b2594d62fd67a Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 2 Feb 2011 13:41:24 +0000 Subject: Refactored sender-supplied routing keys --- include/rabbit.hrl | 2 +- src/rabbit_basic.erl | 69 +++++++++++++++++++++++++++---------- src/rabbit_channel.erl | 18 +--------- src/rabbit_exchange.erl | 60 +++++++------------------------- src/rabbit_exchange_type_direct.erl | 45 +++++++----------------- src/rabbit_exchange_type_topic.erl | 8 ++--- src/rabbit_router.erl | 59 +++++++------------------------ 7 files changed, 93 insertions(+), 168 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 5c5fad76..a8b326be 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -72,7 +72,7 @@ -record(listener, {node, protocol, host, ip_address, port}). -record(basic_message, {exchange_name, routing_key, content, guid, - is_persistent}). + is_persistent, route_list = []}). -record(ssl_socket, {tcp, ssl}). -record(delivery, {mandatory, immediate, txn, sender, message, diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 1ac39b65..c9d4808c 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -33,10 +33,9 @@ -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([publish/1, message/4, properties/1, delivery/5]). +-export([publish/1, message/3, message/4, properties/1, delivery/5]). -export([publish/4, publish/7]). -export([build_content/2, from_content/1]). --export([is_message_persistent/1]). %%---------------------------------------------------------------------------- @@ -56,8 +55,10 @@ rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> - (rabbit_types:message() | rabbit_types:error(any()))). + properties_input(), binary()) -> rabbit_types:message()). +-spec(message/3 :: + (rabbit_exchange:name(), rabbit_router:routing_key(), + rabbit_types:decoded_content()) -> rabbit_types:message()). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -71,9 +72,6 @@ rabbit_types:content()). -spec(from_content/1 :: (rabbit_types:content()) -> {rabbit_framing:amqp_property_record(), binary()}). --spec(is_message_persistent/1 :: (rabbit_types:decoded_content()) -> - (boolean() | - {'invalid', non_neg_integer()})). -endif. @@ -113,19 +111,33 @@ from_content(Content) -> rabbit_framing_amqp_0_9_1:method_id('basic.publish'), {Props, list_to_binary(lists:reverse(FragmentsRev))}. +%% This breaks the spec rule forbidding message modification +strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, + Key) when Headers =/= undefined -> + case lists:keyfind(Key, 1, Headers) of + false -> DecodedContent; + Tuple -> Headers0 = lists:delete(Tuple, Headers), + DecodedContent#content{ + properties_bin = none, + properties = Props#'P_basic'{headers = Headers0}} + end; +strip_header(DecodedContent, _Key) -> + DecodedContent. + +message(ExchangeName, RoutingKey, + #content{properties = Props} = DecodedContent) -> + #basic_message{ + exchange_name = ExchangeName, + routing_key = RoutingKey, + content = strip_header(DecodedContent, ?DELETED_HEADER), + guid = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + route_list = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. + message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> Properties = properties(RawProperties), Content = build_content(Properties, BodyBin), - case is_message_persistent(Content) of - {invalid, Other} -> - {error, {invalid_delivery_mode, Other}}; - IsPersistent when is_boolean(IsPersistent) -> - #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKeyBin, - content = Content, - guid = rabbit_guid:guid(), - is_persistent = IsPersistent} - end. + message(ExchangeName, RoutingKeyBin, Content). properties(P = #'P_basic'{}) -> P; @@ -167,5 +179,26 @@ is_message_persistent(#content{properties = #'P_basic'{ 1 -> false; 2 -> true; undefined -> false; - Other -> {invalid, Other} + Other -> rabbit_log:warning("Unknown delivery mode ~p - " + "treating as 1, non-persistent~n", + [Other]), + false end. + +% Extract CC routes from headers +header_routes(undefined) -> + []; +header_routes(HeadersTable) -> + lists:flatten([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {longstr, Route} -> Route; + {array, Routes} -> rkeys(Routes, []); + _ -> [] + end || HeaderKey <- ?ROUTING_HEADERS]). + +rkeys([{longstr, Route} | Rest], RKeys) -> + rkeys(Rest, [Route | RKeys]); +rkeys([_ | Rest], RKeys) -> + rkeys(Rest, RKeys); +rkeys(_, RKeys) -> + RKeys. + diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 5c900b0b..e818dd54 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -527,18 +527,13 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, %% certain to want to look at delivery-mode and priority. DecodedContent = rabbit_binary_parser:ensure_content_decoded(Content), check_user_id_header(DecodedContent#content.properties, State), - IsPersistent = is_message_persistent(DecodedContent), {MsgSeqNo, State1} = case ConfirmEnabled of false -> {undefined, State}; true -> SeqNo = State#ch.publish_seqno, {SeqNo, State#ch{publish_seqno = SeqNo + 1}} end, - Message = #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, - content = DecodedContent, - guid = rabbit_guid:guid(), - is_persistent = IsPersistent}, + Message = rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -1200,17 +1195,6 @@ notify_limiter(LimiterPid, Acked) -> Count -> rabbit_limiter:ack(LimiterPid, Count) end. -is_message_persistent(Content) -> - case rabbit_basic:is_message_persistent(Content) of - {invalid, Other} -> - rabbit_log:warning("Unknown delivery mode ~p - " - "treating as 1, non-persistent~n", - [Other]), - false; - IsPersistent when is_boolean(IsPersistent) -> - IsPersistent - end. - process_routing_result(unroutable, _, MsgSeqNo, Message, State) -> ok = basic_return(Message, State#ch.writer_pid, no_route), send_confirms([MsgSeqNo], State); diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a94e57f8..92259195 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -1,32 +1,17 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_exchange). @@ -36,7 +21,6 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). -export([callback/3]). --export([header_routes/1]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -89,7 +73,7 @@ (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). -spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). --spec(header_routes/1 :: (rabbit_framing:amqp_table()) -> [binary()]). + -endif. %%---------------------------------------------------------------------------- @@ -324,23 +308,3 @@ unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_exchange, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. - -header_routes(undefined) -> - []; -header_routes(Headers) -> - lists:flatten([routing_keys(Headers, Header) || Header <- ?ROUTING_HEADERS]). - -routing_keys(HeadersTable, Key) -> - case rabbit_misc:table_lookup(HeadersTable, Key) of - {longstr, Route} -> [Route]; - {array, Routes} -> rkeys(Routes, []); - _ -> [] - end. - -rkeys([{longstr, BinVal} | Rest], RKeys) -> - rkeys(Rest, [BinVal | RKeys]); -rkeys([{_, _} | Rest], RKeys) -> - rkeys(Rest, RKeys); -rkeys(_, RKeys) -> - RKeys. - diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 97988381..0baac1f8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -1,37 +1,21 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_exchange_type_direct). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -behaviour(rabbit_exchange_type). @@ -52,12 +36,9 @@ description() -> {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey, - content = Content}}) -> - HeaderKeys = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers), + #delivery{message = #basic_message{route_list = Routes}}) -> lists:flatten([rabbit_router:match_routing_key(Name, RKey) || - RKey <- [RoutingKey | HeaderKeys]]). + RKey <- Routes]). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 8f3c0550..97cf8ecf 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -30,7 +30,6 @@ %% -module(rabbit_exchange_type_topic). --include("rabbit_framing.hrl"). -include("rabbit.hrl"). -behaviour(rabbit_exchange_type). @@ -60,15 +59,12 @@ description() -> {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{routing_key = RoutingKey, - content = Content}}) -> - HeaderKeys = rabbit_exchange:header_routes( - (Content#content.properties)#'P_basic'.headers), + #delivery{message = #basic_message{route_list = Routes}}) -> lists:flatten([rabbit_router:match_bindings( Name, fun (#binding{key = BindingKey}) -> topic_matches(BindingKey, RKey) - end) || RKey <- [RoutingKey | HeaderKeys]]). + end) || RKey <- Routes]). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 7f9b823e..692d2473 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -1,38 +1,22 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ %% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. %% -%% The Original Code is RabbitMQ. +%% The Original Code is RabbitMQ. %% -%% The Initial Developers of the Original Code are LShift Ltd, -%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd, -%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd -%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial -%% Technologies LLC, and Rabbit Technologies Ltd. -%% -%% Portions created by LShift Ltd are Copyright (C) 2007-2010 LShift -%% Ltd. Portions created by Cohesive Financial Technologies LLC are -%% Copyright (C) 2007-2010 Cohesive Financial Technologies -%% LLC. Portions created by Rabbit Technologies Ltd are Copyright -%% (C) 2007-2010 Rabbit Technologies Ltd. -%% -%% All Rights Reserved. -%% -%% Contributor(s): ______________________________________. +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. %% -module(rabbit_router). -include_lib("stdlib/include/qlc.hrl"). -include("rabbit.hrl"). --include("rabbit_framing.hrl"). -export([deliver/2, match_bindings/2, match_routing_key/2]). @@ -69,39 +53,22 @@ deliver(QNames, Delivery = #delivery{mandatory = false, %% is preserved. This scales much better than the non-immediate %% case below. QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), delegate:invoke_no_result( - QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, ModifiedDelivery) end), + QPids, fun (Pid) -> rabbit_amqqueue:deliver(Pid, Delivery) end), {routed, QPids}; deliver(QNames, Delivery = #delivery{mandatory = Mandatory, immediate = Immediate}) -> QPids = lookup_qpids(QNames), - ModifiedDelivery = strip_header(Delivery, ?DELETED_HEADER), {Success, _} = delegate:invoke(QPids, fun (Pid) -> - rabbit_amqqueue:deliver(Pid, ModifiedDelivery) + rabbit_amqqueue:deliver(Pid, Delivery) end), {Routed, Handled} = lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). -%% This breaks the spec rule forbidding message modification -strip_header(Delivery = #delivery{message = Message = #basic_message{ - content = Content = #content{ - properties = Props = #'P_basic'{headers = Headers}}}}, - Key) when Headers =/= undefined -> - case lists:keyfind(Key, 1, Headers) of - false -> Delivery; - Tuple -> Headers0 = lists:delete(Tuple, Headers), - Delivery#delivery{message = Message#basic_message{ - content = Content#content{ - properties_bin = none, - properties = Props#'P_basic'{headers = Headers0}}}} - end; -strip_header(Delivery, _Key) -> - Delivery. %% TODO: Maybe this should be handled by a cursor instead. %% TODO: This causes a full scan for each entry with the same source -- cgit v1.2.1 From c6e14cf23bcf5cebe1a9f2c3f44d1669d05cb961 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 4 Feb 2011 13:39:35 +0000 Subject: Treat basic_return immediate/mandatory differently --- src/rabbit_channel.erl | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index f9c3c286..ebd8b15c 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1081,12 +1081,11 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, basic_return(#basic_message{exchange_name = ExchangeName, routing_key = RoutingKey, content = Content}, - State, Reason) -> - maybe_incr_stats([{ExchangeName, 1}], return, State), + WriterPid, Reason) -> {_Close, ReplyCode, ReplyText} = rabbit_framing_amqp_0_9_1:lookup_amqp_exception(Reason), ok = rabbit_writer:send_command( - State#ch.writer_pid, + WriterPid, #'basic.return'{reply_code = ReplyCode, reply_text = ReplyText, exchange = ExchangeName#resource.name, @@ -1240,11 +1239,17 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> +process_routing_result(unroutable, _, XName, MsgSeqNo, + Msg = #basic_message{exchange_name = ExchangeName}, + State) -> ok = basic_return(Msg, State#ch.writer_pid, no_route), + maybe_incr_stats([{ExchangeName, 1}], return_unroutable, State), record_confirm(MsgSeqNo, XName, State); -process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> +process_routing_result(not_delivered, _, XName, MsgSeqNo, + Msg = #basic_message{exchange_name = ExchangeName}, + State) -> ok = basic_return(Msg, State#ch.writer_pid, no_consumers), + maybe_incr_stats([{ExchangeName, 1}], return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); -- cgit v1.2.1 From cd64ab0f9b9fe0689a74681fed4e65d7ce333b8f Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 4 Feb 2011 13:42:51 +0000 Subject: cosmetic --- src/rabbit_channel.erl | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index ebd8b15c..87357b89 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1239,17 +1239,15 @@ is_message_persistent(Content) -> IsPersistent end. -process_routing_result(unroutable, _, XName, MsgSeqNo, - Msg = #basic_message{exchange_name = ExchangeName}, - State) -> +process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State#ch.writer_pid, no_route), - maybe_incr_stats([{ExchangeName, 1}], return_unroutable, State), + maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], + return_unroutable, State), record_confirm(MsgSeqNo, XName, State); -process_routing_result(not_delivered, _, XName, MsgSeqNo, - Msg = #basic_message{exchange_name = ExchangeName}, - State) -> +process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State#ch.writer_pid, no_consumers), - maybe_incr_stats([{ExchangeName, 1}], return_not_delivered, State), + maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], + return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); -- cgit v1.2.1 From 5ecfe82f4886dee81d6de41e2811b6ab46c0297c Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 4 Feb 2011 14:18:19 +0000 Subject: Remove redundant try/catch from event notifier --- src/rabbit_event.erl | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 40ade4b7..40651d36 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -130,15 +130,8 @@ notify_if(true, Type, Props) -> notify(Type, Props); notify_if(false, _Type, _Props) -> ok. notify(Type, Props) -> - try - %% TODO: switch to os:timestamp() when we drop support for - %% Erlang/OTP < R13B01 - gen_event:notify(rabbit_event, #event{type = Type, - props = Props, - timestamp = now()}) - catch error:badarg -> - %% badarg means rabbit_event is no longer registered. We never - %% unregister it so the great likelihood is that we're shutting - %% down the broker but some events were backed up. Ignore it. - ok - end. + %% TODO: switch to os:timestamp() when we drop support for + %% Erlang/OTP < R13B01 + gen_event:notify(rabbit_event, #event{type = Type, + props = Props, + timestamp = now()}). -- cgit v1.2.1 From 631e455ea25ea4202568c40ceb615c8cdeb94a16 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Mon, 7 Feb 2011 14:23:01 +0000 Subject: fixing binding recovery --- src/rabbit_exchange_type_topic.erl | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 0beaa714..c1741b30 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -46,7 +46,12 @@ route(#exchange{name = X}, validate(_X) -> ok. create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. + +recover(_Exchange, Bs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> + lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) + end). delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), @@ -55,10 +60,8 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_binding(true, _Exchange, #binding{source = X, key = K, destination = D}) -> - FinalNode = follow_down_create(X, split_topic_key(K)), - trie_add_binding(X, FinalNode, D), - ok; +add_binding(true, _Exchange, Binding) -> + internal_add_binding(Binding); add_binding(false, _Exchange, _Binding) -> ok. @@ -79,6 +82,11 @@ assert_args_equivalence(X, Args) -> %%---------------------------------------------------------------------------- +internal_add_binding(#binding{source = X, key = K, destination = D}) -> + FinalNode = follow_down_create(X, split_topic_key(K)), + trie_add_binding(X, FinalNode, D), + ok. + trie_match(X, Words) -> trie_match(X, root, Words, []). -- cgit v1.2.1 From caea05b408f238891410107431b3b0994e02ae66 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Feb 2011 16:05:02 +0000 Subject: Just depend on "erlang". --- packaging/debs/Debian/debian/control | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control index 02da0cc6..b01d38b3 100644 --- a/packaging/debs/Debian/debian/control +++ b/packaging/debs/Debian/debian/control @@ -7,10 +7,7 @@ Standards-Version: 3.8.0 Package: rabbitmq-server Architecture: all -# erlang-inets is not a strict dependency, but it's needed to allow -# the installation of plugins that use mochiweb. Ideally it would be a -# "Recommends" instead, but gdebi does not install those. -Depends: erlang-base (>= 1:12.b.3) | erlang-base-hipe (>= 1:12.b.3), erlang-ssl | erlang-nox (<< 1:13.b-dfsg1-1), erlang-os-mon | erlang-nox (<< 1:13.b-dfsg1-1), erlang-mnesia | erlang-nox (<< 1:13.b-dfsg1-1), erlang-inets | erlang-nox (<< 1:13.b-dfsg1-1), adduser, logrotate, ${misc:Depends} +Depends: erlang (>= 1:12.b.3), adduser, logrotate, ${misc:Depends} Description: An AMQP server written in Erlang RabbitMQ is an implementation of AMQP, the emerging standard for high performance enterprise messaging. The RabbitMQ server is a robust and -- cgit v1.2.1 -- cgit v1.2.1 From 1fcc077284fd29b909ac1bf57b3b1916cbe4927e Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Mon, 7 Feb 2011 14:45:08 -0800 Subject: Updated file_handle_cache.erl --- src/file_handle_cache.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 1e1f37cb..b5b07eca 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -908,10 +908,10 @@ handle_cast({transfer, FromPid, ToPid}, State) -> ok = track_client(ToPid, State#fhc_state.clients), {noreply, process_pending( update_counts(obtain, ToPid, +1, - update_counts(obtain, FromPid, -1, State)))}; + update_counts(obtain, FromPid, -1, State)))}. -handle_cast(check_counts, State) -> - {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })}. +handle_info(check_counts, State) -> + {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })}; handle_info({'DOWN', _MRef, process, Pid, _Reason}, State = #fhc_state { elders = Elders, @@ -1104,9 +1104,9 @@ reduce(State = #fhc_state { open_pending = OpenPending, end end, case TRef of - undefined -> {ok, TRef1} = timer:apply_after( - ?FILE_HANDLES_CHECK_INTERVAL, - gen_server, cast, [?SERVER, check_counts]), + undefined -> TRef1 = erlang:send_after( + ?FILE_HANDLES_CHECK_INTERVAL, ?SERVER, + check_counts), State #fhc_state { timer_ref = TRef1 }; _ -> State end. -- cgit v1.2.1 From 09790a240a33d8a464a0cfa410e71c08052c8d27 Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Mon, 7 Feb 2011 14:58:10 -0800 Subject: Updated rabbit_msg_store. --- src/rabbit_msg_store.erl | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index e9c356e1..5fec2659 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -23,11 +23,12 @@ client_ref/1, close_all_indicated/1, write/3, read/2, contains/2, remove/2, release/2, sync/3]). --export([sync/1, set_maximum_since_use/2, - has_readers/2, combine_files/3, delete_file/2]). %% internal +-export([set_maximum_since_use/2, has_readers/2, combine_files/3, + delete_file/2]). %% internal --export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3, prioritise_call/3, prioritise_cast/2, + prioritise_info/2]). %%---------------------------------------------------------------------------- @@ -154,7 +155,6 @@ -spec(sync/3 :: ([rabbit_guid:guid()], fun (() -> any()), client_msstate()) -> 'ok'). --spec(sync/1 :: (server()) -> 'ok'). -spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok'). -spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()). -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> @@ -455,9 +455,6 @@ release([], _CState) -> ok; release(Guids, CState) -> server_cast(CState, {release, Guids}). sync(Guids, K, CState) -> server_cast(CState, {sync, Guids, K}). -sync(Server) -> - gen_server2:cast(Server, sync). - set_maximum_since_use(Server, Age) -> gen_server2:cast(Server, {set_maximum_since_use, Age}). @@ -698,7 +695,6 @@ prioritise_call(Msg, _From, _State) -> prioritise_cast(Msg, _State) -> case Msg of - sync -> 8; {combine_files, _Source, _Destination, _Reclaimed} -> 8; {delete_file, _File, _Reclaimed} -> 8; {set_maximum_since_use, _Age} -> 8; @@ -706,6 +702,12 @@ prioritise_cast(Msg, _State) -> _ -> 0 end. +prioritise_info(Msg, _State) -> + case Msg of + sync -> 8; + _ -> 0 + end. + handle_call(successfully_recovered_state, _From, State) -> reply(State #msstate.successfully_recovered, State); @@ -797,9 +799,6 @@ handle_cast({sync, Guids, K}, true -> noreply(State #msstate { on_sync = [K | Syncs] }) end; -handle_cast(sync, State) -> - noreply(internal_sync(State)); - handle_cast({combine_files, Source, Destination, Reclaimed}, State = #msstate { sum_file_size = SumFileSize, file_handles_ets = FileHandlesEts, @@ -823,6 +822,9 @@ handle_cast({set_maximum_since_use, Age}, State) -> ok = file_handle_cache:set_maximum_since_use(Age), noreply(State). +handle_info(sync, State) -> + noreply(internal_sync(State)); + handle_info(timeout, State) -> noreply(internal_sync(State)); @@ -888,13 +890,13 @@ next_state(State = #msstate { on_sync = Syncs, end. start_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> - {ok, TRef} = timer:apply_after(?SYNC_INTERVAL, ?MODULE, sync, [self()]), + TRef = erlang:send_after(?SYNC_INTERVAL, self(), sync), State #msstate { sync_timer_ref = TRef }. stop_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> State; stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> - {ok, cancel} = timer:cancel(TRef), + erlang:cancel_timer(TRef), State #msstate { sync_timer_ref = undefined }. internal_sync(State = #msstate { current_file_handle = CurHdl, -- cgit v1.2.1 From 5e2c80888e3d4660a4dbc23640a2465980051020 Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Mon, 7 Feb 2011 15:06:16 -0800 Subject: Updated supervisor2. --- src/file_handle_cache.erl | 2 +- src/rabbit_msg_store.erl | 6 +++--- src/supervisor2.erl | 29 ++++++++++++----------------- 3 files changed, 16 insertions(+), 21 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index b5b07eca..27d24b5d 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1079,7 +1079,7 @@ reduce(State = #fhc_state { open_pending = OpenPending, timer_ref = TRef }) -> Now = now(), {CStates, Sum, ClientCount} = - dict:fold(fun (Pid, Eldest, {CStatesAcc, SumAcc, CountAcc} = Accs) -> + dict:fold(fun (Pid, Eldest, {CStatesAcc, SumAcc, CountAcc} = A [#cstate { pending_closes = PendingCloses, opened = Opened, blocked = Blocked } = CState] = diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 5fec2659..e0f05275 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -24,11 +24,11 @@ write/3, read/2, contains/2, remove/2, release/2, sync/3]). -export([set_maximum_since_use/2, has_readers/2, combine_files/3, - delete_file/2]). %% internal + delete_file/2]). %% internal -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, prioritise_call/3, prioritise_cast/2, - prioritise_info/2]). + code_change/3, prioritise_call/3, prioritise_cast/2, + prioritise_info/2]). %%---------------------------------------------------------------------------- diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 1a240856..d1537f26 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -73,7 +73,6 @@ %% Internal exports -export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3]). -export([handle_cast/2]). --export([delayed_restart/2]). -define(DICT, dict). @@ -154,9 +153,6 @@ check_childspecs(ChildSpecs) when is_list(ChildSpecs) -> end; check_childspecs(X) -> {error, {badarg, X}}. -delayed_restart(Supervisor, RestartDetails) -> - gen_server:cast(Supervisor, {delayed_restart, RestartDetails}). - %%% --------------------------------------------------- %%% %%% Initialize the supervisor. @@ -352,12 +348,19 @@ handle_call(which_children, _From, State) -> State#state.children), {reply, Resp, State}. +%%% Hopefully cause a function-clause as there is no API function +%%% that utilizes cast. +handle_cast(null, State) -> + error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", + []), + + {noreply, State}. -handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) +handle_info({delayed_restart, {RestartType, Reason, Child}}, State) when ?is_simple(State) -> {ok, NState} = do_restart(RestartType, Reason, Child, State), {noreply, NState}; -handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> +handle_info({delayed_restart, {RestartType, Reason, Child}}, State) -> case get_child(Child#child.name, State) of {value, Child1} -> {ok, NState} = do_restart(RestartType, Reason, Child1, State), @@ -366,14 +369,6 @@ handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> {noreply, State} end; -%%% Hopefully cause a function-clause as there is no API function -%%% that utilizes cast. -handle_cast(null, State) -> - error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", - []), - - {noreply, State}. - %% %% Take care of terminated children. %% @@ -536,9 +531,9 @@ do_restart({RestartType, Delay}, Reason, Child, State) -> {ok, NState} -> {ok, NState}; {terminate, NState} -> - {ok, _TRef} = timer:apply_after( - trunc(Delay*1000), ?MODULE, delayed_restart, - [self(), {{RestartType, Delay}, Reason, Child}]), + _TRef = erlang:send_after(trunc(Delay*1000), self(), + {delayed_restart, + {{RestartType, Delay}, Reason, Child}}), {ok, state_del_child(Child, NState)} end; do_restart(permanent, Reason, Child, State) -> -- cgit v1.2.1 From 985dac102d3e4bc2b155fec579a7986c2eebc023 Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Mon, 7 Feb 2011 15:20:57 -0800 Subject: Updated rabbit_event and rabbit_reader. Removed accidental tabs from file_handle_cache. --- src/file_handle_cache.erl | 2 +- src/rabbit_event.erl | 17 ++++++++--------- src/rabbit_reader.erl | 12 ++---------- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 27d24b5d..b5b07eca 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1079,7 +1079,7 @@ reduce(State = #fhc_state { open_pending = OpenPending, timer_ref = TRef }) -> Now = now(), {CStates, Sum, ClientCount} = - dict:fold(fun (Pid, Eldest, {CStatesAcc, SumAcc, CountAcc} = A + dict:fold(fun (Pid, Eldest, {CStatesAcc, SumAcc, CountAcc} = Accs) -> [#cstate { pending_closes = PendingCloses, opened = Opened, blocked = Blocked } = CState] = diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 40ade4b7..90b2f66c 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -19,7 +19,7 @@ -include("rabbit.hrl"). -export([start_link/0]). --export([init_stats_timer/0, ensure_stats_timer/2, stop_stats_timer/1]). +-export([init_stats_timer/0, ensure_stats_timer/3, stop_stats_timer/1]). -export([reset_stats_timer/1]). -export([stats_level/1, if_enabled/2]). -export([notify/2, notify_if/3]). @@ -56,7 +56,7 @@ -spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()). -spec(init_stats_timer/0 :: () -> state()). --spec(ensure_stats_timer/2 :: (state(), timer_fun()) -> state()). +-spec(ensure_stats_timer/3 :: (state(), pid(), term()) -> state()). -spec(stop_stats_timer/1 :: (state()) -> state()). -spec(reset_stats_timer/1 :: (state()) -> state()). -spec(stats_level/1 :: (state()) -> level()). @@ -79,7 +79,7 @@ start_link() -> %% if_enabled(internal_emit_stats) - so we immediately send something %% %% On wakeup: -%% ensure_stats_timer(Timer, emit_stats) +%% ensure_stats_timer(Timer, Pid, emit_stats) %% (Note we can't emit stats immediately, the timer may have fired 1ms ago.) %% %% emit_stats: @@ -97,13 +97,12 @@ init_stats_timer() -> {ok, StatsLevel} = application:get_env(rabbit, collect_statistics), #state{level = StatsLevel, timer = undefined}. -ensure_stats_timer(State = #state{level = none}, _Fun) -> +ensure_stats_timer(State = #state{level = none}, _Pid, _Msg) -> State; -ensure_stats_timer(State = #state{timer = undefined}, Fun) -> - {ok, TRef} = timer:apply_after(?STATS_INTERVAL, - erlang, apply, [Fun, []]), +ensure_stats_timer(State = #state{timer = undefined}, Pid, Msg) -> + TRef = erlang:send_after(?STATS_INTERVAL, Pid, Msg), State#state{timer = TRef}; -ensure_stats_timer(State, _Fun) -> +ensure_stats_timer(State, _Pid, _Msg) -> State. stop_stats_timer(State = #state{level = none}) -> @@ -111,7 +110,7 @@ stop_stats_timer(State = #state{level = none}) -> stop_stats_timer(State = #state{timer = undefined}) -> State; stop_stats_timer(State = #state{timer = TRef}) -> - {ok, cancel} = timer:cancel(TRef), + erlang:cancel_timer(TRef), State#state{timer = undefined}. reset_stats_timer(State) -> diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 1781469a..34883058 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -28,8 +28,6 @@ -export([process_channel_frame/5]). %% used by erlang-client --export([emit_stats/1]). - -define(HANDSHAKE_TIMEOUT, 10). -define(NORMAL_TIMEOUT, 3). -define(CLOSING_TIMEOUT, 1). @@ -157,7 +155,6 @@ -spec(info_keys/0 :: () -> rabbit_types:info_keys()). -spec(info/1 :: (pid()) -> rabbit_types:infos()). -spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()). --spec(emit_stats/1 :: (pid()) -> 'ok'). -spec(shutdown/2 :: (pid(), string()) -> 'ok'). -spec(conserve_memory/2 :: (pid(), boolean()) -> 'ok'). -spec(server_properties/0 :: () -> rabbit_framing:amqp_table()). @@ -212,9 +209,6 @@ info(Pid, Items) -> {error, Error} -> throw(Error) end. -emit_stats(Pid) -> - gen_server:cast(Pid, emit_stats). - conserve_memory(Pid, Conserve) -> Pid ! {conserve_memory, Conserve}, ok. @@ -376,7 +370,7 @@ mainloop(Deb, State = #v1{parent = Parent, sock= Sock, recv_ref = Ref}) -> catch Error -> {error, Error} end), mainloop(Deb, State); - {'$gen_cast', emit_stats} -> + emit_stats -> State1 = internal_emit_stats(State), mainloop(Deb, State1); {system, From, Request} -> @@ -671,10 +665,8 @@ refuse_connection(Sock, Exception) -> ensure_stats_timer(State = #v1{stats_timer = StatsTimer, connection_state = running}) -> - Self = self(), State#v1{stats_timer = rabbit_event:ensure_stats_timer( - StatsTimer, - fun() -> emit_stats(Self) end)}; + StatsTimer, self(), emit_stats)}; ensure_stats_timer(State) -> State. -- cgit v1.2.1 From e8344dabf107d074162aa786af539b9d60c09378 Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Mon, 7 Feb 2011 15:43:11 -0800 Subject: Updated rabbit_amqqueue_process. Ready to test. --- src/rabbit_amqqueue_process.erl | 66 ++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 7c7e28fe..f707e3e1 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -223,10 +223,8 @@ stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> State#q{sync_timer_ref = undefined}. ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> - {ok, TRef} = timer:apply_after( - ?RAM_DURATION_UPDATE_INTERVAL, - rabbit_amqqueue, update_ram_duration, - [self()]), + TRef = erlang:send_after( + ?RAM_DURATION_UPDATE_INTERVAL, self(), update_ram_duration), State#q{rate_timer_ref = TRef}; ensure_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; @@ -238,13 +236,13 @@ stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> - {ok, cancel} = timer:cancel(TRef), + erlang:cancel_timer(TRef), State#q{rate_timer_ref = undefined}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) -> - {ok, cancel} = timer:cancel(TRef), + erlang:cancel_timer(TRef), State#q{expiry_timer_ref = undefined}. %% We wish to expire only when there are no consumers *and* the expiry @@ -256,18 +254,16 @@ ensure_expiry_timer(State = #q{expires = Expires}) -> case is_unused(State) of true -> NewState = stop_expiry_timer(State), - {ok, TRef} = timer:apply_after( - Expires, rabbit_amqqueue, maybe_expire, [self()]), + TRef = erlang:send_after(Expires, self(), maybe_expire), NewState#q{expiry_timer_ref = TRef}; false -> State end. -ensure_stats_timer(State = #q{stats_timer = StatsTimer, - q = Q}) -> - State#q{stats_timer = rabbit_event:ensure_stats_timer( - StatsTimer, - fun() -> rabbit_amqqueue:emit_stats(Q) end)}. +ensure_stats_timer(State = #q { stats_timer = StatsTimer, + q = #amqqueue { pid = QPid }}) -> + State #q { stats_timer = rabbit_event:ensure_stats_timer( + StatsTimer, QPid, emit_stats) }. assert_invariant(#q{active_consumers = AC, backing_queue = BQ, backing_queue_state = BQS}) -> @@ -677,8 +673,7 @@ ensure_ttl_timer(State = #q{backing_queue = BQ, when TTL =/= undefined -> case BQ:is_empty(BQS) of true -> State; - false -> TRef = timer:apply_after(TTL, rabbit_amqqueue, drop_expired, - [self()]), + false -> TRef = erlang:send_after(TTL, self(), drop_expired), State#q{ttl_timer_ref = TRef} end; ensure_ttl_timer(State) -> @@ -764,13 +759,9 @@ prioritise_call(Msg, _From, _State) -> prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; delete_immediately -> 8; {set_ram_duration_target, _Duration} -> 8; {set_maximum_since_use, _Age} -> 8; - maybe_expire -> 8; - drop_expired -> 8; - emit_stats -> 7; {ack, _Txn, _MsgIds, _ChPid} -> 7; {reject, _MsgIds, _Requeue, _ChPid} -> 7; {notify_sent, _ChPid} -> 7; @@ -782,7 +773,14 @@ prioritise_cast(Msg, _State) -> prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, #q{q = #amqqueue{exclusive_owner = DownPid}}) -> 8; -prioritise_info(_Msg, _State) -> 0. +prioritise_info(Msg, _State) -> + case Msg of + update_ram_duration -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + _ -> 0 + end. handle_call({init, Recover}, From, State = #q{q = #amqqueue{exclusive_owner = none}}) -> @@ -1085,15 +1083,6 @@ handle_cast({flush, ChPid}, State) -> ok = rabbit_channel:flushed(ChPid, self()), noreply(State); -handle_cast(update_ram_duration, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - noreply(State#q{rate_timer_ref = just_measured, - backing_queue_state = BQS2}); - handle_cast({set_ram_duration_target, Duration}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> BQS1 = BQ:set_ram_duration_target(Duration, BQS), @@ -1101,24 +1090,33 @@ handle_cast({set_ram_duration_target, Duration}, handle_cast({set_maximum_since_use, Age}, State) -> ok = file_handle_cache:set_maximum_since_use(Age), - noreply(State); + noreply(State). + +handle_info(update_ram_duration, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + noreply(State#q{rate_timer_ref = just_measured, + backing_queue_state = BQS2}); -handle_cast(maybe_expire, State) -> +handle_info(maybe_expire, State) -> case is_unused(State) of true -> ?LOGDEBUG("Queue lease expired for ~p~n", [State#q.q]), {stop, normal, State}; false -> noreply(ensure_expiry_timer(State)) end; -handle_cast(drop_expired, State) -> +handle_info(drop_expired, State) -> noreply(drop_expired_messages(State#q{ttl_timer_ref = undefined})); -handle_cast(emit_stats, State = #q{stats_timer = StatsTimer}) -> +handle_info(emit_stats, State = #q{stats_timer = StatsTimer}) -> %% Do not invoke noreply as it would see no timer and create a new one. emit_stats(State), State1 = State#q{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}, assert_invariant(State1), - {noreply, State1, hibernate}. + {noreply, State1, hibernate}; handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State = #q{q = #amqqueue{exclusive_owner = DownPid}}) -> -- cgit v1.2.1 From ff50bb3990b48d58b533ffe86bc8a7faa18b9982 Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Mon, 7 Feb 2011 17:22:40 -0800 Subject: Dies in rabbit_channel:handle_info with emit_stats msg. Will fix typo in the morning. Also dies in com.rabbitmq.client.test.server.EffectVisibilityCrossNodeTest.testEffectVisibility test, but so does the default branch. --- Makefile | 2 ++ src/rabbit_amqqueue_process.erl | 10 +++++----- src/rabbit_channel.erl | 3 +-- src/rabbit_event.erl | 2 +- src/rabbit_msg_store.erl | 2 +- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 51b998f4..301e83e4 100644 --- a/Makefile +++ b/Makefile @@ -110,6 +110,8 @@ $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_c dialyze: $(BEAM_TARGETS) $(BASIC_PLT) dialyzer --plt $(BASIC_PLT) --no_native \ + -Wunmatched_returns -Werror_handling -Wbehaviours \ + -Wunderspecs \ -Wrace_conditions $(BEAM_TARGETS) # rabbit.plt is used by rabbitmq-erlang-client's dialyze make target diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index f707e3e1..2999aab2 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -224,7 +224,7 @@ stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> TRef = erlang:send_after( - ?RAM_DURATION_UPDATE_INTERVAL, self(), update_ram_duration), + ?RAM_DURATION_UPDATE_INTERVAL, self(), update_ram_duration), State#q{rate_timer_ref = TRef}; ensure_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; @@ -236,13 +236,13 @@ stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> - erlang:cancel_timer(TRef), + _ = erlang:cancel_timer(TRef), State#q{rate_timer_ref = undefined}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) -> - erlang:cancel_timer(TRef), + _ = erlang:cancel_timer(TRef), State#q{expiry_timer_ref = undefined}. %% We wish to expire only when there are no consumers *and* the expiry @@ -261,9 +261,9 @@ ensure_expiry_timer(State = #q{expires = Expires}) -> end. ensure_stats_timer(State = #q { stats_timer = StatsTimer, - q = #amqqueue { pid = QPid }}) -> + q = #amqqueue { pid = QPid }}) -> State #q { stats_timer = rabbit_event:ensure_stats_timer( - StatsTimer, QPid, emit_stats) }. + StatsTimer, QPid, emit_stats) }. assert_invariant(#q{active_consumers = AC, backing_queue = BQ, backing_queue_state = BQS}) -> diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index a82e5eff..1c4c1631 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -342,8 +342,7 @@ next_state(Mask, State) -> ensure_stats_timer(State = #ch{stats_timer = StatsTimer}) -> ChPid = self(), State#ch{stats_timer = rabbit_event:ensure_stats_timer( - StatsTimer, - fun() -> emit_stats(ChPid) end)}. + StatsTimer, ChPid, emit_stats)}. return_ok(State, true, _Msg) -> {noreply, State}; return_ok(State, false, Msg) -> {reply, Msg, State}. diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 90b2f66c..f4ee279b 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -110,7 +110,7 @@ stop_stats_timer(State = #state{level = none}) -> stop_stats_timer(State = #state{timer = undefined}) -> State; stop_stats_timer(State = #state{timer = TRef}) -> - erlang:cancel_timer(TRef), + _ = erlang:cancel_timer(TRef), State#state{timer = undefined}. reset_stats_timer(State) -> diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index e0f05275..75ca0b8b 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -896,7 +896,7 @@ start_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> stop_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> State; stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> - erlang:cancel_timer(TRef), + _ = erlang:cancel_timer(TRef), State #msstate { sync_timer_ref = undefined }. internal_sync(State = #msstate { current_file_handle = CurHdl, -- cgit v1.2.1 From a3a7b9126c914e6fb5c1ef62baeedbbc3ef7650b Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Tue, 8 Feb 2011 16:21:17 -0800 Subject: All tests pass. --- src/rabbit_amqqueue.erl | 5 ----- src/rabbit_channel.erl | 11 ++++++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index a6da551d..dc3f249a 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -27,7 +27,6 @@ check_exclusive_access/2, with_exclusive_access_or_die/3, stat/1, deliver/2, requeue/3, ack/4, reject/4]). -export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]). --export([emit_stats/1]). -export([consumers/1, consumers_all/1]). -export([basic_get/3, basic_consume/7, basic_cancel/4]). -export([notify_sent/2, unblock/2, flush_all/2]). @@ -97,7 +96,6 @@ -spec(stat/1 :: (rabbit_types:amqqueue()) -> {'ok', non_neg_integer(), non_neg_integer()}). --spec(emit_stats/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete_immediately/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete/3 :: (rabbit_types:amqqueue(), 'false', 'false') @@ -358,9 +356,6 @@ consumers_all(VHostPath) -> stat(#amqqueue{pid = QPid}) -> delegate_call(QPid, stat, infinity). -emit_stats(#amqqueue{pid = QPid}) -> - delegate_cast(QPid, emit_stats). - delete_immediately(#amqqueue{ pid = QPid }) -> gen_server2:cast(QPid, delete_immediately). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 1c4c1631..eb80e437 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -265,11 +265,6 @@ handle_cast({deliver, ConsumerTag, AckRequired, end, State), noreply(State1#ch{next_tag = DeliveryTag + 1}); -handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> - internal_emit_stats(State), - noreply([ensure_stats_timer], - State#ch{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}); - handle_cast({confirm, MsgSeqNos, From}, State) -> State1 = #ch{confirmed = C} = confirm(MsgSeqNos, From, State), noreply([send_confirms], State1, case C of [] -> hibernate; _ -> 0 end). @@ -277,6 +272,12 @@ handle_cast({confirm, MsgSeqNos, From}, State) -> handle_info(timeout, State) -> noreply(State); +handle_info(emit_stats, State = #ch{stats_timer = StatsTimer}) -> + internal_emit_stats(State), + noreply([ensure_stats_timer], + State#ch{ + stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}); + handle_info({'DOWN', _MRef, process, QPid, Reason}, State = #ch{unconfirmed = UC}) -> %% TODO: this does a complete scan and partial rebuild of the -- cgit v1.2.1 From c6248e4437c04032e9231d265181e2e87d615ef5 Mon Sep 17 00:00:00 2001 From: John DeTreville Date: Tue, 8 Feb 2011 17:23:53 -0800 Subject: Sorry, accidentally shipped some changes to Makefile that I use for testing. --- Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Makefile b/Makefile index 301e83e4..51b998f4 100644 --- a/Makefile +++ b/Makefile @@ -110,8 +110,6 @@ $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_c dialyze: $(BEAM_TARGETS) $(BASIC_PLT) dialyzer --plt $(BASIC_PLT) --no_native \ - -Wunmatched_returns -Werror_handling -Wbehaviours \ - -Wunderspecs \ -Wrace_conditions $(BEAM_TARGETS) # rabbit.plt is used by rabbitmq-erlang-client's dialyze make target -- cgit v1.2.1 From f315a8348de87819dc3b1fbd1987f94c176e8e01 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 9 Feb 2011 12:01:21 +0000 Subject: Sender-selected destinations - qa feedback --- include/rabbit.hrl | 4 ++-- src/rabbit_basic.erl | 38 +++++++++++++------------------------ src/rabbit_channel.erl | 6 +++--- src/rabbit_exchange_type_direct.erl | 6 +++--- src/rabbit_exchange_type_topic.erl | 12 ++++++------ src/rabbit_types.erl | 2 +- 6 files changed, 28 insertions(+), 40 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 0b6280d1..7bcf021e 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -56,8 +56,8 @@ -record(listener, {node, protocol, host, ip_address, port}). --record(basic_message, {exchange_name, routing_key, content, guid, - is_persistent, route_list = []}). +-record(basic_message, {exchange_name, routing_keys = [], content, guid, + is_persistent}). -record(ssl_socket, {tcp, ssl}). -record(delivery, {mandatory, immediate, txn, sender, message, diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index a144124f..f1348d33 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -97,15 +97,15 @@ from_content(Content) -> {Props, list_to_binary(lists:reverse(FragmentsRev))}. %% This breaks the spec rule forbidding message modification -strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, - Key) when Headers =/= undefined -> - case lists:keyfind(Key, 1, Headers) of - false -> DecodedContent; - Tuple -> Headers0 = lists:delete(Tuple, Headers), +strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} + = DecodedContent, Key) when Headers =/= undefined -> + rabbit_binary_generator:clear_encoded_content( + case lists:keyfind(Key, 1, Headers) of + false -> DecodedContent; + Tuple -> Headers0 = lists:delete(Tuple, Headers), DecodedContent#content{ - properties_bin = none, properties = Props#'P_basic'{headers = Headers0}} - end; + end); strip_header(DecodedContent, _Key) -> DecodedContent. @@ -113,11 +113,10 @@ message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> #basic_message{ exchange_name = ExchangeName, - routing_key = RoutingKey, content = strip_header(DecodedContent, ?DELETED_HEADER), guid = rabbit_guid:guid(), is_persistent = is_message_persistent(DecodedContent), - route_list = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. + routing_keys = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> Properties = properties(RawProperties), @@ -164,26 +163,15 @@ is_message_persistent(#content{properties = #'P_basic'{ 1 -> false; 2 -> true; undefined -> false; - Other -> rabbit_log:warning("Unknown delivery mode ~p - " - "treating as 1, non-persistent~n", - [Other]), - false + _ -> false end. % Extract CC routes from headers header_routes(undefined) -> []; header_routes(HeadersTable) -> - lists:flatten([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {longstr, Route} -> Route; - {array, Routes} -> rkeys(Routes, []); - _ -> [] - end || HeaderKey <- ?ROUTING_HEADERS]). - -rkeys([{longstr, Route} | Rest], RKeys) -> - rkeys(Rest, [Route | RKeys]); -rkeys([_ | Rest], RKeys) -> - rkeys(Rest, RKeys); -rkeys(_, RKeys) -> - RKeys. + lists:append([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + _ -> [] + end || HeaderKey <- ?ROUTING_HEADERS]). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index be232bd2..16a3911d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -243,7 +243,7 @@ handle_cast({command, Msg}, State = #ch{writer_pid = WriterPid}) -> handle_cast({deliver, ConsumerTag, AckRequired, Msg = {_QName, QPid, _MsgId, Redelivered, #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, + routing_keys = [RoutingKey | _CcRoutes], content = Content}}}, State = #ch{writer_pid = WriterPid, next_tag = DeliveryTag}) -> @@ -609,7 +609,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, {ok, MessageCount, Msg = {_QName, QPid, _MsgId, Redelivered, #basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, + routing_keys = [RoutingKey | _CcRoutes], content = Content}}} -> State1 = lock_message(not(NoAck), ack_record(DeliveryTag, none, Msg), @@ -1074,7 +1074,7 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, end. basic_return(#basic_message{exchange_name = ExchangeName, - routing_key = RoutingKey, + routing_keys = [RoutingKey | _CcRoutes], content = Content}, WriterPid, Reason) -> {_Close, ReplyCode, ReplyText} = diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 0baac1f8..82776c4a 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -36,9 +36,9 @@ description() -> {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{route_list = Routes}}) -> - lists:flatten([rabbit_router:match_routing_key(Name, RKey) || - RKey <- Routes]). + #delivery{message = #basic_message{routing_keys = Routes}}) -> + lists:append([rabbit_router:match_routing_key(Name, RKey) || + RKey <- Routes]). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index beee4974..27251d12 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -44,12 +44,12 @@ description() -> {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, - #delivery{message = #basic_message{route_list = Routes}}) -> - lists:flatten([rabbit_router:match_bindings( - Name, - fun (#binding{key = BindingKey}) -> - topic_matches(BindingKey, RKey) - end) || RKey <- Routes]). + #delivery{message = #basic_message{routing_keys = Routes}}) -> + lists:append([rabbit_router:match_bindings( + Name, + fun (#binding{key = BindingKey}) -> + topic_matches(BindingKey, RKey) + end) || RKey <- Routes]). split_topic_key(Key) -> string:tokens(binary_to_list(Key), "."). diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 3dbe740f..ab2300c0 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -64,7 +64,7 @@ -type(content() :: undecoded_content() | decoded_content()). -type(basic_message() :: #basic_message{exchange_name :: rabbit_exchange:name(), - routing_key :: rabbit_router:routing_key(), + routing_keys :: [rabbit_router:routing_key()], content :: content(), guid :: rabbit_guid:guid(), is_persistent :: boolean()}). -- cgit v1.2.1 From 340ae1fdefe6b7b9558292ca1e7ff43ecde06ac4 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 9 Feb 2011 12:16:20 +0000 Subject: Only clear encoded content when necessary --- src/rabbit_basic.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index f1348d33..5ea145d4 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -99,13 +99,13 @@ from_content(Content) -> %% This breaks the spec rule forbidding message modification strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, Key) when Headers =/= undefined -> - rabbit_binary_generator:clear_encoded_content( - case lists:keyfind(Key, 1, Headers) of - false -> DecodedContent; - Tuple -> Headers0 = lists:delete(Tuple, Headers), + case lists:keyfind(Key, 1, Headers) of + false -> DecodedContent; + Tuple -> Headers0 = lists:delete(Tuple, Headers), + rabbit_binary_generator:clear_encoded_content( DecodedContent#content{ - properties = Props#'P_basic'{headers = Headers0}} - end); + properties = Props#'P_basic'{headers = Headers0}}) + end; strip_header(DecodedContent, _Key) -> DecodedContent. -- cgit v1.2.1 From d23a7dd42b8ee932dffdc9f9cb0c286bb3cb4982 Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Thu, 10 Feb 2011 16:32:03 +0000 Subject: Added code to raise or clear alarm "file_descriptor_limit" when transitions between being able to obtain file descriptors (e.g. for sockets) or not, and vice versa, occur. Method adjust_alarm contains the logic to set/clear alarm based on previous and new state. --- src/file_handle_cache.erl | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 1e1f37cb..a1b8efc1 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -869,13 +869,13 @@ handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, {noreply, reduce(State #fhc_state { obtain_pending = pending_in(Item, Pending) })}; false -> - {noreply, run_pending_item(Item, State)} + {noreply, adjust_alarm(State, run_pending_item(Item, State))} end; handle_call({set_limit, Limit}, _From, State) -> {reply, ok, maybe_reduce( - process_pending(State #fhc_state { + adjust_alarm(State, process_pending(State #fhc_state { limit = Limit, - obtain_limit = obtain_limit(Limit) }))}; + obtain_limit = obtain_limit(Limit) })))}; handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) -> {reply, Limit, State}. @@ -900,9 +900,9 @@ handle_cast({close, Pid, EldestUnusedSince}, _ -> dict:store(Pid, EldestUnusedSince, Elders) end, ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}), - {noreply, process_pending( + {noreply, adjust_alarm(State, process_pending( update_counts(open, Pid, -1, - State #fhc_state { elders = Elders1 }))}; + State #fhc_state { elders = Elders1 })))}; handle_cast({transfer, FromPid, ToPid}, State) -> ok = track_client(ToPid, State#fhc_state.clients), @@ -924,13 +924,15 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason}, ets:lookup(Clients, Pid), true = ets:delete(Clients, Pid), FilterFun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end, - {noreply, process_pending( - State #fhc_state { - open_count = OpenCount - Opened, - open_pending = filter_pending(FilterFun, OpenPending), - obtain_count = ObtainCount - Obtained, - obtain_pending = filter_pending(FilterFun, ObtainPending), - elders = dict:erase(Pid, Elders) })}. + {noreply, adjust_alarm( + State, + process_pending( + State #fhc_state { + open_count = OpenCount - Opened, + open_pending = filter_pending(FilterFun, OpenPending), + obtain_count = ObtainCount - Obtained, + obtain_pending = filter_pending(FilterFun, ObtainPending), + elders = dict:erase(Pid, Elders) }))}. terminate(_Reason, State = #fhc_state { clients = Clients }) -> ets:delete(Clients), @@ -990,6 +992,18 @@ obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of OLimit -> OLimit end. +obtain_limit_reached(#fhc_state { obtain_limit = Limit, + obtain_count = Count}) -> + Limit =/= infinity andalso Count >= Limit. + +adjust_alarm(OldState, NewState) -> + case {obtain_limit_reached(OldState), obtain_limit_reached(NewState)} of + {false, true} -> alarm_handler:set_alarm({file_descriptor_limit, []}); + {true, false} -> alarm_handler:clear_alarm(file_descriptor_limit); + _ -> ok + end, + NewState. + requested({_Kind, _Pid, Requested, _From}) -> Requested. -- cgit v1.2.1 From 2344574821599928d2af80a6026c92278f266a7e Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Fri, 11 Feb 2011 12:07:40 +0000 Subject: Refactored handle_call({obtain, ...}, ...) into single headed function --- src/file_handle_cache.erl | 71 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index a1b8efc1..9bb7abd7 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -849,33 +849,60 @@ handle_call({open, Pid, Requested, EldestUnusedSince}, From, false -> {noreply, run_pending_item(Item, State1)} end; -handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, - obtain_count = Count, - obtain_pending = Pending, - clients = Clients }) - when Limit =/= infinity andalso Count >= Limit -> - ok = track_client(Pid, Clients), - true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), - Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, - {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; +%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, +%% obtain_count = Count, +%% obtain_pending = Pending, +%% clients = Clients }) +%% when Limit =/= infinity andalso Count >= Limit -> +%% ok = track_client(Pid, Clients), +%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), +%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, +%% {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; + +%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, +%% obtain_pending = Pending, +%% clients = Clients }) -> +%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, +%% ok = track_client(Pid, Clients), +%% case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of +%% true -> +%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), +%% {noreply, reduce(State #fhc_state { +%% obtain_pending = pending_in(Item, Pending) })}; +%% false -> +%% {noreply, adjust_alarm(State, run_pending_item(Item, State))} +%% end; + handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, obtain_pending = Pending, clients = Clients }) -> - Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, ok = track_client(Pid, Clients), - case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of - true -> - true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), - {noreply, reduce(State #fhc_state { - obtain_pending = pending_in(Item, Pending) })}; - false -> - {noreply, adjust_alarm(State, run_pending_item(Item, State))} - end; + Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, + Enqueue = fun () -> + true = ets:update_element(Clients, Pid, + {#cstate.blocked, true}), + State #fhc_state { + obtain_pending = pending_in(Item, Pending) } + end, + {noreply, + case obtain_limit_reached(State) of + true -> Enqueue(); + false -> case needs_reduce(State #fhc_state { + obtain_count = Count + 1 }) of + true -> reduce(Enqueue()); + false -> adjust_alarm( + State, run_pending_item(Item, State)) + end + end}; + handle_call({set_limit, Limit}, _From, State) -> - {reply, ok, maybe_reduce( - adjust_alarm(State, process_pending(State #fhc_state { - limit = Limit, - obtain_limit = obtain_limit(Limit) })))}; + {reply, ok, adjust_alarm( + State, maybe_reduce( + process_pending( + State #fhc_state { + limit = Limit, + obtain_limit = obtain_limit(Limit) })))}; + handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) -> {reply, Limit, State}. -- cgit v1.2.1 From f70f04a1cbbc3fb1ba01bd2c4ed28ff764955fb1 Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Fri, 11 Feb 2011 12:11:43 +0000 Subject: remove commented out code --- src/file_handle_cache.erl | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 9bb7abd7..921b1211 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -849,30 +849,6 @@ handle_call({open, Pid, Requested, EldestUnusedSince}, From, false -> {noreply, run_pending_item(Item, State1)} end; -%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_limit = Limit, -%% obtain_count = Count, -%% obtain_pending = Pending, -%% clients = Clients }) -%% when Limit =/= infinity andalso Count >= Limit -> -%% ok = track_client(Pid, Clients), -%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), -%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, -%% {noreply, State #fhc_state { obtain_pending = pending_in(Item, Pending) }}; - -%% handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, -%% obtain_pending = Pending, -%% clients = Clients }) -> -%% Item = #pending { kind = obtain, pid = Pid, requested = 1, from = From }, -%% ok = track_client(Pid, Clients), -%% case needs_reduce(State #fhc_state { obtain_count = Count + 1 }) of -%% true -> -%% true = ets:update_element(Clients, Pid, {#cstate.blocked, true}), -%% {noreply, reduce(State #fhc_state { -%% obtain_pending = pending_in(Item, Pending) })}; -%% false -> -%% {noreply, adjust_alarm(State, run_pending_item(Item, State))} -%% end; - handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count, obtain_pending = Pending, clients = Clients }) -> -- cgit v1.2.1 From 99ac15fbc28d60adc0d38899a5a7f770530ca466 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 11 Feb 2011 12:51:50 +0000 Subject: Upgrade messages --- include/rabbit_backing_queue_spec.hrl | 3 ++ src/rabbit.erl | 1 + src/rabbit_msg_file.erl | 68 ++++++++++++++++++++--------------- src/rabbit_msg_store.erl | 62 ++++++++++++++++++++++++++++++++ src/rabbit_upgrade_functions.erl | 19 ++++++++++ src/rabbit_variable_queue.erl | 15 +++++++- 6 files changed, 138 insertions(+), 30 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index accb2c0e..52ffd413 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,3 +65,6 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). +-spec(transform_storage/1 :: + (fun ((binary()) -> (rabbit_types:ok_or_error2(any(), any())))) -> + non_neg_integer()). diff --git a/src/rabbit.erl b/src/rabbit.erl index c6661d39..9e241e80 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -38,6 +38,7 @@ -rabbit_boot_step({database, [{mfa, {rabbit_mnesia, init, []}}, + {requires, file_handle_cache}, {enables, external_infrastructure}]}). -rabbit_boot_step({file_handle_cache, diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index cfea4982..ad87ee16 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -16,7 +16,7 @@ -module(rabbit_msg_file). --export([append/3, read/2, scan/2]). +-export([append/3, read/2, scan/2, scan/3]). %%---------------------------------------------------------------------------- @@ -48,6 +48,9 @@ -spec(scan/2 :: (io_device(), file_size()) -> {'ok', [{rabbit_guid:guid(), msg_size(), position()}], position()}). +-spec(scan/3 :: (io_device(), file_size(), + fun ((rabbit_guid:guid(), msg_size(), position(), binary()) -> any())) -> + {'ok', [any()], position()}). -endif. @@ -79,43 +82,50 @@ read(FileHdl, TotalSize) -> KO -> KO end. +scan_fun(Guid, TotalSize, Offset, _Msg) -> + {Guid, TotalSize, Offset}. + scan(FileHdl, FileSize) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0). + scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/4). + +scan(FileHdl, FileSize, Fun) when FileSize >= 0 -> + scan(FileHdl, FileSize, <<>>, 0, [], 0, Fun). -scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset) -> +scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset, _Fun) -> {ok, Acc, ScanOffset}; -scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset) -> +scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset, Fun) -> Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]), case file_handle_cache:read(FileHdl, Read) of {ok, Data1} -> {Data2, Acc1, ScanOffset1} = - scan(<>, Acc, ScanOffset), + scanner(<>, Acc, ScanOffset, Fun), ReadOffset1 = ReadOffset + size(Data1), - scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1); + scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1, Fun); _KO -> {ok, Acc, ScanOffset} end. -scan(<<>>, Acc, Offset) -> - {<<>>, Acc, Offset}; -scan(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset) -> - {<<>>, Acc, Offset}; %% Nothing to do other than stop. -scan(<>, Acc, Offset) -> - TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, - case WriteMarker of - ?WRITE_OK_MARKER -> - %% Here we take option 5 from - %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the Guid as a number, and then convert it - %% back to a binary in order to work around bugs in - %% Erlang's GC. - <> = - <>, - <> = <>, - scan(Rest, [{Guid, TotalSize, Offset} | Acc], Offset + TotalSize); - _ -> - scan(Rest, Acc, Offset + TotalSize) - end; -scan(Data, Acc, Offset) -> - {Data, Acc, Offset}. +scanner(<<>>, Acc, Offset, _Fun) -> + {<<>>, Acc, Offset}; +scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset, _Fun) -> + {<<>>, Acc, Offset}; %% Nothing to do other than stop. +scanner(<>, Acc, Offset, Fun) -> + TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, + case WriteMarker of + ?WRITE_OK_MARKER -> + %% Here we take option 5 from + %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in + %% which we read the Guid as a number, and then convert it + %% back to a binary in order to work around bugs in + %% Erlang's GC. + <> = + <>, + <> = <>, + scanner(Rest, [Fun(Guid, TotalSize, Offset, Msg) | Acc], + Offset + TotalSize, Fun); + _ -> + scanner(Rest, Acc, Offset + TotalSize, Fun) + end; +scanner(Data, Acc, Offset, _Fun) -> + {Data, Acc, Offset}. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index e9c356e1..bd8d61e8 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -26,16 +26,20 @@ -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal +-export([transform_dir/3, force_recovery/2]). %% upgrade + -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). %%---------------------------------------------------------------------------- -include("rabbit_msg_store.hrl"). +-include_lib("kernel/include/file.hrl"). -define(SYNC_INTERVAL, 5). %% milliseconds -define(CLEAN_FILENAME, "clean.dot"). -define(FILE_SUMMARY_FILENAME, "file_summary.ets"). +-define(TRANSFORM_TMP, "transform_tmp"). -define(BINARY_MODE, [raw, binary]). -define(READ_MODE, [read]). @@ -160,6 +164,10 @@ -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). +-spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). +-spec(transform_dir/3 :: (file:filename(), server(), + fun ((binary())->({'ok', msg()} | {error, any()}))) -> + non_neg_integer()). -endif. @@ -1956,3 +1964,57 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {got, FinalOffsetZ}, {destination, Destination}]} end. + +force_recovery(BaseDir, Server) -> + Dir = filename:join(BaseDir, atom_to_list(Server)), + file:delete(filename:join(Dir, ?CLEAN_FILENAME)), + [file:delete(filename:join(Dir, File)) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], + ok. + +transform_dir(BaseDir, Server, TransformFun) -> + Dir = filename:join(BaseDir, atom_to_list(Server)), + TmpDir = filename:join(Dir, ?TRANSFORM_TMP), + case filelib:is_dir(TmpDir) of + true -> throw({error, previously_failed_transform}); + false -> + Count = lists:sum( + [transform_msg_file(filename:join(Dir, File), + filename:join(TmpDir, File), + TransformFun) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)]), + [file:delete(filename:join(Dir, File)) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], + [file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) || + File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], + [file:delete(filename:join(TmpDir, File)) || + File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], + ok = file:del_dir(TmpDir), + Count + end. + +transform_msg_file(FileOld, FileNew, TransformFun) -> + rabbit_misc:ensure_parent_dirs_exist(FileNew), + {ok, #file_info{size=Size}} = file:read_file_info(FileOld), + {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), + {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], + [{write_buffer, + ?HANDLE_CACHE_BUFFER_SIZE}]), + {ok, Acc, Size} = + rabbit_msg_file:scan( + RefOld, Size, + fun(Guid, _Size, _Offset, BinMsg) -> + case TransformFun(BinMsg) of + {ok, MsgNew} -> + rabbit_msg_file:append(RefNew, Guid, MsgNew), + 1; + {error, Reason} -> + error_logger:error_msg("Message transform failed: ~p~n", + [Reason]), + 0 + end + end), + file_handle_cache:close(RefOld), + file_handle_cache:close(RefNew), + lists:sum(Acc). + diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 68b88b3e..f4e27cc8 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -25,6 +25,7 @@ -rabbit_upgrade({add_ip_to_listener, []}). -rabbit_upgrade({internal_exchanges, []}). -rabbit_upgrade({user_to_internal_user, [hash_passwords]}). +-rabbit_upgrade({multiple_routing_keys, []}). %% ------------------------------------------------------------------- @@ -35,6 +36,7 @@ -spec(add_ip_to_listener/0 :: () -> 'ok'). -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). +-spec(multiple_routing_keys/0 :: () -> 'ok'). -endif. @@ -101,3 +103,20 @@ mnesia(TableName, Fun, FieldList, NewRecordName) -> {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, NewRecordName), ok. + +%%-------------------------------------------------------------------- + +multiple_routing_keys() -> + _UpgradeMsgCount = rabbit_variable_queue:transform_storage( + fun (BinMsg) -> + case binary_to_term(BinMsg) of + {basic_message, ExchangeName, Routing_Key, Content, Guid, + Persistent} -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + _ -> + {error, corrupt_message} + end + end), + ok. + diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 7142d560..f2176c0e 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1]). + status/1, transform_storage/1]). -export([start/1, stop/0]). @@ -1801,3 +1801,16 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> push_betas_to_deltas( Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) end. + +%%---------------------------------------------------------------------------- +%% Upgrading +%%---------------------------------------------------------------------------- + +%% Assumes message store is not running +transform_storage(TransformFun) -> + transform_store(?PERSISTENT_MSG_STORE, TransformFun) + + transform_store(?TRANSIENT_MSG_STORE, TransformFun). + +transform_store(Store, TransformFun) -> + rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), + rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). -- cgit v1.2.1 From 131e0bcdad6b6ecaa82ae807ec033a289c937179 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 11 Feb 2011 17:28:13 +0000 Subject: rabbit_msg_file:scan/4 now looks a bit more like fold Also ignore garbage at the end of a message store --- src/rabbit_msg_file.erl | 20 ++++++++++---------- src/rabbit_msg_store.erl | 31 ++++++++++++++----------------- src/rabbit_upgrade_functions.erl | 2 +- src/rabbit_variable_queue.erl | 2 +- 4 files changed, 26 insertions(+), 29 deletions(-) diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index ad87ee16..9d5953d5 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -16,7 +16,7 @@ -module(rabbit_msg_file). --export([append/3, read/2, scan/2, scan/3]). +-export([append/3, read/2, scan/2, scan/4]). %%---------------------------------------------------------------------------- @@ -48,9 +48,9 @@ -spec(scan/2 :: (io_device(), file_size()) -> {'ok', [{rabbit_guid:guid(), msg_size(), position()}], position()}). --spec(scan/3 :: (io_device(), file_size(), - fun ((rabbit_guid:guid(), msg_size(), position(), binary()) -> any())) -> - {'ok', [any()], position()}). +-spec(scan/4 :: (io_device(), file_size(), + fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), + A) -> {'ok', A, position()}). -endif. @@ -82,14 +82,14 @@ read(FileHdl, TotalSize) -> KO -> KO end. -scan_fun(Guid, TotalSize, Offset, _Msg) -> - {Guid, TotalSize, Offset}. +scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> + [{Guid, TotalSize, Offset} | Acc]. scan(FileHdl, FileSize) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/4). + scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/2). -scan(FileHdl, FileSize, Fun) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0, Fun). +scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> + scan(FileHdl, FileSize, <<>>, 0, Acc, 0, Fun). scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset, _Fun) -> {ok, Acc, ScanOffset}; @@ -122,7 +122,7 @@ scanner(<> = <>, <> = <>, - scanner(Rest, [Fun(Guid, TotalSize, Offset, Msg) | Acc], + scanner(Rest, Fun({Guid, TotalSize, Offset, Msg}, Acc), Offset + TotalSize, Fun); _ -> scanner(Rest, Acc, Offset + TotalSize, Fun) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index bd8d61e8..b827eba9 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -166,8 +166,7 @@ -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). -spec(transform_dir/3 :: (file:filename(), server(), - fun ((binary())->({'ok', msg()} | {error, any()}))) -> - non_neg_integer()). + fun ((binary()) -> ({'ok', msg()} | {error, any()}))) -> 'ok'). -endif. @@ -1976,21 +1975,19 @@ transform_dir(BaseDir, Server, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Server)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), case filelib:is_dir(TmpDir) of - true -> throw({error, previously_failed_transform}); + true -> throw({error, transform_failed_previously}); false -> - Count = lists:sum( - [transform_msg_file(filename:join(Dir, File), - filename:join(TmpDir, File), - TransformFun) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)]), + [transform_msg_file(filename:join(Dir, File), + filename:join(TmpDir, File), + TransformFun) || + File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], [file:delete(filename:join(Dir, File)) || File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], [file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) || File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], [file:delete(filename:join(TmpDir, File)) || File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], - ok = file:del_dir(TmpDir), - Count + ok = file:del_dir(TmpDir) end. transform_msg_file(FileOld, FileNew, TransformFun) -> @@ -2000,21 +1997,21 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), - {ok, Acc, Size} = + {ok, Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, Size, - fun(Guid, _Size, _Offset, BinMsg) -> + fun({Guid, _Size, _Offset, BinMsg}, ok) -> case TransformFun(BinMsg) of {ok, MsgNew} -> - rabbit_msg_file:append(RefNew, Guid, MsgNew), - 1; + {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + ok; {error, Reason} -> error_logger:error_msg("Message transform failed: ~p~n", [Reason]), - 0 + ok end - end), + end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), - lists:sum(Acc). + ok = Acc. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index f4e27cc8..73f59557 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -107,7 +107,7 @@ mnesia(TableName, Fun, FieldList, NewRecordName) -> %%-------------------------------------------------------------------- multiple_routing_keys() -> - _UpgradeMsgCount = rabbit_variable_queue:transform_storage( + rabbit_variable_queue:transform_storage( fun (BinMsg) -> case binary_to_term(BinMsg) of {basic_message, ExchangeName, Routing_Key, Content, Guid, diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index f2176c0e..dee6a8e5 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -1808,7 +1808,7 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> %% Assumes message store is not running transform_storage(TransformFun) -> - transform_store(?PERSISTENT_MSG_STORE, TransformFun) + + transform_store(?PERSISTENT_MSG_STORE, TransformFun), transform_store(?TRANSIENT_MSG_STORE, TransformFun). transform_store(Store, TransformFun) -> -- cgit v1.2.1 From a62685c1495b2e95f2e127ab607ec1634a18cc62 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 11 Feb 2011 17:56:09 +0000 Subject: Remove rabbit_msg_file:scan/2 --- src/rabbit_msg_file.erl | 11 +---------- src/rabbit_msg_store.erl | 6 +++++- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 9d5953d5..81f2f07e 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -16,7 +16,7 @@ -module(rabbit_msg_file). --export([append/3, read/2, scan/2, scan/4]). +-export([append/3, read/2, scan/4]). %%---------------------------------------------------------------------------- @@ -45,9 +45,6 @@ -spec(read/2 :: (io_device(), msg_size()) -> rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, any())). --spec(scan/2 :: (io_device(), file_size()) -> - {'ok', [{rabbit_guid:guid(), msg_size(), position()}], - position()}). -spec(scan/4 :: (io_device(), file_size(), fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), A) -> {'ok', A, position()}). @@ -82,12 +79,6 @@ read(FileHdl, TotalSize) -> KO -> KO end. -scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> - [{Guid, TotalSize, Offset} | Acc]. - -scan(FileHdl, FileSize) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, [], 0, fun scan_fun/2). - scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> scan(FileHdl, FileSize, <<>>, 0, Acc, 0, Fun). diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index b827eba9..82fb1735 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1530,7 +1530,8 @@ scan_file_for_valid_messages(Dir, FileName) -> case open_file(Dir, FileName, ?READ_MODE) of {ok, Hdl} -> Valid = rabbit_msg_file:scan( Hdl, filelib:file_size( - form_filename(Dir, FileName))), + form_filename(Dir, FileName)), + fun scan_fun/2, []), %% if something really bad has happened, %% the close could fail, but ignore file_handle_cache:close(Hdl), @@ -1539,6 +1540,9 @@ scan_file_for_valid_messages(Dir, FileName) -> {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} end. +scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> + [{Guid, TotalSize, Offset} | Acc]. + %% Takes the list in *ascending* order (i.e. eldest message %% first). This is the opposite of what scan_file_for_valid_messages %% produces. The list of msgs that is produced is youngest first. -- cgit v1.2.1 From 2d91f7b8e01c19f8e1e81199eb9fedf9ef485333 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sat, 12 Feb 2011 20:59:39 +0000 Subject: Added documentation for gm 'become' callback result --- include/gm_specs.hrl | 2 +- src/gm.erl | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index 987866db..2109d15d 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -16,7 +16,7 @@ -ifdef(use_specs). --type(callback_result() :: 'ok' | {'stop', any()}). +-type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}). -type(args() :: [any()]). -type(members() :: [pid()]). diff --git a/src/gm.erl b/src/gm.erl index 8fea9196..283b2431 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -432,6 +432,20 @@ behaviour_info(callbacks) -> [ + %% The joined, members_changed and handle_msg callbacks can all + %% return any of the following terms: + %% + %% 'ok' - the callback function returns normally + %% + %% {'stop', Reason} - the callback indicates the member should + %% stop with reason Reason and should leave the group. + %% + %% {'become', Module, Args} - the callback indicates that the + %% callback module should be changed to Module and that the + %% callback functions should now be passed the arguments + %% Args. This allows the callback module to be dynamically + %% changed. + %% Called when we've successfully joined the group. Supplied with %% Args provided in start_link, plus current group members. {joined, 2}, -- cgit v1.2.1 From 0b4ffb33067b778ebbe30fd2c4b0b9f9160c18c3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Feb 2011 13:08:43 +0000 Subject: Be explicit where we can be --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f7befebc..e7da6a43 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -373,7 +373,7 @@ init_db(ClusterNodes, Force) -> {[], false} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[], _} -> + {[], true} -> %% We're the first node up ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade(local) of -- cgit v1.2.1 From 1c45e13da167b1cc01992521089efda440d29f65 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Feb 2011 13:39:05 +0000 Subject: Cosmetic --- src/rabbit_upgrade.erl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f279029a..bd3e829c 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -49,19 +49,19 @@ maybe_upgrade_mnesia() -> rabbit:prepare(), Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of - [_|_] = Upgrades -> - case am_i_upgrader(Nodes) of - true -> primary_upgrade(Upgrades, Nodes); - false -> non_primary_upgrade(Nodes) - end; - [] -> - ok; version_not_available -> case Nodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " "< 2.1.1.~n Unfortunately you will need to " "rebuild the cluster.", []) + end; + [] -> + ok; + Upgrades -> + case am_i_upgrader(Nodes) of + true -> primary_upgrade(Upgrades, Nodes); + false -> non_primary_upgrade(Nodes) end end. -- cgit v1.2.1 From 650217882d88c150663e17b8e5a9a8ce4f59f9a4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Feb 2011 13:59:39 +0000 Subject: inlining --- src/rabbit_upgrade.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index bd3e829c..c8d2ae87 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -66,8 +66,7 @@ maybe_upgrade_mnesia() -> end. am_i_upgrader(Nodes) -> - Running = nodes_running(Nodes), - case Running of + case nodes_running(Nodes) of [] -> case am_i_disc_node() of true -> true; -- cgit v1.2.1 From 394c73b033ca71d98b0572317852b107abe97a38 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 14 Feb 2011 16:57:16 +0000 Subject: Sender-selected distribution updates --- src/rabbit_basic.erl | 2 +- src/rabbit_msg_store.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 5ea145d4..7fa68882 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -101,7 +101,7 @@ strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} = DecodedContent, Key) when Headers =/= undefined -> case lists:keyfind(Key, 1, Headers) of false -> DecodedContent; - Tuple -> Headers0 = lists:delete(Tuple, Headers), + Found -> Headers0 = lists:delete(Found, Headers), rabbit_binary_generator:clear_encoded_content( DecodedContent#content{ properties = Props#'P_basic'{headers = Headers0}}) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 82fb1735..f7afbef5 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -2001,7 +2001,7 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), - {ok, Acc, _IgnoreSize} = + {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, Size, fun({Guid, _Size, _Offset, BinMsg}, ok) -> @@ -2017,5 +2017,5 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), - ok = Acc. + ok. -- cgit v1.2.1 From 94910f541801e40958d81bf7dbee15923c1a4c2a Mon Sep 17 00:00:00 2001 From: Vlad Ionescu Date: Mon, 14 Feb 2011 11:38:21 -0600 Subject: fixing database upgrade --- src/rabbit_upgrade_functions.erl | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 68b88b3e..4f679483 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -25,6 +25,7 @@ -rabbit_upgrade({add_ip_to_listener, []}). -rabbit_upgrade({internal_exchanges, []}). -rabbit_upgrade({user_to_internal_user, [hash_passwords]}). +-rabbit_upgrade({topic_trie, []}). %% ------------------------------------------------------------------- @@ -47,7 +48,7 @@ %% point. remove_user_scope() -> - mnesia( + transform( rabbit_user_permission, fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) -> {user_permission, UV, {permission, Conf, Write, Read}} @@ -55,7 +56,7 @@ remove_user_scope() -> [user_vhost, permission]). hash_passwords() -> - mnesia( + transform( rabbit_user, fun ({user, Username, Password, IsAdmin}) -> Hash = rabbit_auth_backend_internal:hash_password(Password), @@ -64,7 +65,7 @@ hash_passwords() -> [username, password_hash, is_admin]). add_ip_to_listener() -> - mnesia( + transform( rabbit_listener, fun ({listener, Node, Protocol, Host, Port}) -> {listener, Node, Protocol, Host, {0,0,0,0}, Port} @@ -77,27 +78,41 @@ internal_exchanges() -> fun ({exchange, Name, Type, Durable, AutoDelete, Args}) -> {exchange, Name, Type, Durable, AutoDelete, false, Args} end, - [ ok = mnesia(T, + [ ok = transform(T, AddInternalFun, [name, type, durable, auto_delete, internal, arguments]) || T <- Tables ], ok. user_to_internal_user() -> - mnesia( + transform( rabbit_user, fun({user, Username, PasswordHash, IsAdmin}) -> {internal_user, Username, PasswordHash, IsAdmin} end, [username, password_hash, is_admin], internal_user). +topic_trie() -> + create(rabbit_topic_trie_edge, + [{record_name, topic_trie_edge}, + {attributes, {trie_edge, exchange_name, node_id, word}}, + {type, ordered_set}]), + create(rabbit_topic_trie_binding, + [{record_name, topic_trie_binding}, + {attributes, {trie_binding, exchange_name, node_id, destination}}, + {type, ordered_set}]). + %%-------------------------------------------------------------------- -mnesia(TableName, Fun, FieldList) -> +transform(TableName, Fun, FieldList) -> {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList), ok. -mnesia(TableName, Fun, FieldList, NewRecordName) -> +transform(TableName, Fun, FieldList, NewRecordName) -> {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, NewRecordName), ok. + +create(Tab, TabDef) -> + {atomic, ok} = mnesia:create_table(Tab, TabDef), + ok. -- cgit v1.2.1 From f01d24451a6f53313eacf82d2971d6eebe83bb55 Mon Sep 17 00:00:00 2001 From: Vlad Ionescu Date: Mon, 14 Feb 2011 11:53:00 -0600 Subject: fixing field lists --- src/rabbit_upgrade_functions.erl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 4f679483..a1ccb121 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -36,6 +36,7 @@ -spec(add_ip_to_listener/0 :: () -> 'ok'). -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). +-spec(topic_trie/0 :: () -> 'ok'). -endif. @@ -79,8 +80,8 @@ internal_exchanges() -> {exchange, Name, Type, Durable, AutoDelete, false, Args} end, [ ok = transform(T, - AddInternalFun, - [name, type, durable, auto_delete, internal, arguments]) + AddInternalFun, + [name, type, durable, auto_delete, internal, arguments]) || T <- Tables ], ok. @@ -95,11 +96,11 @@ user_to_internal_user() -> topic_trie() -> create(rabbit_topic_trie_edge, [{record_name, topic_trie_edge}, - {attributes, {trie_edge, exchange_name, node_id, word}}, + {attributes, {topic_trie_edge, trie_edge, node_id}}, {type, ordered_set}]), create(rabbit_topic_trie_binding, [{record_name, topic_trie_binding}, - {attributes, {trie_binding, exchange_name, node_id, destination}}, + {attributes, {topic_trie_binding, trie_binding, value = const}}, {type, ordered_set}]). %%-------------------------------------------------------------------- -- cgit v1.2.1 From 91323e269a0e9f9fff5c5b4cbdeb43e3b6c9014c Mon Sep 17 00:00:00 2001 From: Vlad Ionescu Date: Mon, 14 Feb 2011 12:07:19 -0600 Subject: fixing attributes in rabbit_upgrade_functions:topic_trie/0 --- src/rabbit_upgrade_functions.erl | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index a1ccb121..36d1f2dc 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -94,14 +94,12 @@ user_to_internal_user() -> [username, password_hash, is_admin], internal_user). topic_trie() -> - create(rabbit_topic_trie_edge, - [{record_name, topic_trie_edge}, - {attributes, {topic_trie_edge, trie_edge, node_id}}, - {type, ordered_set}]), - create(rabbit_topic_trie_binding, - [{record_name, topic_trie_binding}, - {attributes, {topic_trie_binding, trie_binding, value = const}}, - {type, ordered_set}]). + create(rabbit_topic_trie_edge, [{record_name, topic_trie_edge}, + {attributes, [trie_edge, node_id]}, + {type, ordered_set}]), + create(rabbit_topic_trie_binding, [{record_name, topic_trie_binding}, + {attributes, [trie_binding, value]}, + {type, ordered_set}]). %%-------------------------------------------------------------------- -- cgit v1.2.1 From 3f86e7afaf4bfe8f66e81ad96f491de48f159d84 Mon Sep 17 00:00:00 2001 From: Vlad Ionescu Date: Mon, 14 Feb 2011 12:49:43 -0600 Subject: fixing upgrade from schema with missing tables --- src/rabbit_mnesia.erl | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 9bebae4b..c6441b68 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -552,9 +552,17 @@ create_local_table_copy(Tab, Type) -> end, ok. -wait_for_replicated_tables() -> wait_for_tables(replicated_table_names()). - -wait_for_tables() -> wait_for_tables(table_names()). +wait_for_replicated_tables() -> + AllTablesSet = ordsets:from_list(mnesia:system_info(tables)), + ReplicatedTablesSet = ordsets:from_list(replicated_table_names()), + wait_for_tables(ordsets:to_list(ordsets:intersection(AllTablesSet, + ReplicatedTablesSet))). + +wait_for_tables() -> + AllTablesSet = ordsets:from_list(mnesia:system_info(tables)), + RabbitTablesSet = ordsets:from_list(table_names()), + wait_for_tables(ordsets:to_list(ordsets:intersection(AllTablesSet, + RabbitTablesSet))). wait_for_tables(TableNames) -> case mnesia:wait_for_tables(TableNames, 30000) of -- cgit v1.2.1 From 3263bbd984306d328d7d1c1f0314bce56cc6c0da Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Mon, 14 Feb 2011 13:19:56 -0600 Subject: removing duplication and use of sets in rabbit_mnesia --- src/rabbit_mnesia.erl | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c6441b68..51b6c6a9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -553,19 +553,14 @@ create_local_table_copy(Tab, Type) -> ok. wait_for_replicated_tables() -> - AllTablesSet = ordsets:from_list(mnesia:system_info(tables)), - ReplicatedTablesSet = ordsets:from_list(replicated_table_names()), - wait_for_tables(ordsets:to_list(ordsets:intersection(AllTablesSet, - ReplicatedTablesSet))). + wait_for_tables(replicated_table_names()). wait_for_tables() -> - AllTablesSet = ordsets:from_list(mnesia:system_info(tables)), - RabbitTablesSet = ordsets:from_list(table_names()), - wait_for_tables(ordsets:to_list(ordsets:intersection(AllTablesSet, - RabbitTablesSet))). + wait_for_tables(table_names()). wait_for_tables(TableNames) -> - case mnesia:wait_for_tables(TableNames, 30000) of + Inexistent = TableNames -- mnesia:system_info(tables), + case mnesia:wait_for_tables(TableNames -- Inexistent, 30000) of ok -> ok; {timeout, BadTabs} -> throw({error, {timeout_waiting_for_tables, BadTabs}}); -- cgit v1.2.1 From afe476dc81752d9708fc07c48390f438989f27b1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Feb 2011 12:29:33 +0000 Subject: Move Protocol to after vhost consistently --- src/rabbit_channel.erl | 12 ++++++------ src/rabbit_channel_sup.erl | 14 +++++++------- src/rabbit_direct.erl | 14 +++++++------- src/rabbit_reader.erl | 4 ++-- src/rabbit_tests.erl | 10 ++++++---- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b9d1baf0..12a668ad 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -67,8 +67,8 @@ -type(channel_number() :: non_neg_integer()). -spec(start_link/8 :: - (rabbit_types:protocol(), channel_number(), pid(), pid(), - rabbit_types:user(), rabbit_types:vhost(), pid(), + (channel_number(), pid(), pid(), rabbit_types:user(), + rabbit_types:vhost(), rabbit_types:protocol(), pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> rabbit_types:ok_pid_or_error()). -spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). @@ -95,11 +95,11 @@ %%---------------------------------------------------------------------------- -start_link(Protocol, Channel, ReaderPid, WriterPid, User, VHost, CollectorPid, +start_link(Channel, ReaderPid, WriterPid, User, VHost, Protocol, CollectorPid, StartLimiterFun) -> gen_server2:start_link(?MODULE, - [Protocol, Channel, ReaderPid, WriterPid, User, - VHost, CollectorPid, StartLimiterFun], []). + [Channel, ReaderPid, WriterPid, User, VHost, + Protocol, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> do(Pid, Method, none). @@ -153,7 +153,7 @@ ready_for_close(Pid) -> %%--------------------------------------------------------------------------- -init([Protocol, Channel, ReaderPid, WriterPid, User, VHost, CollectorPid, +init([Channel, ReaderPid, WriterPid, User, VHost, Protocol, CollectorPid, StartLimiterFun]) -> process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 9bc0546c..f528a9c6 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -31,11 +31,11 @@ -export_type([start_link_args/0]). -type(start_link_args() :: - {'tcp', rabbit_types:protocol(), rabbit_net:socket(), - rabbit_channel:channel_number(), non_neg_integer(), pid(), - rabbit_types:user(), rabbit_types:vhost(), pid()} | - {'direct', rabbit_types:protocol(), rabbit_channel:channel_number(), - pid(), rabbit_types:user(), rabbit_types:vhost(), pid()}). + {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(), + non_neg_integer(), pid(), rabbit_types:user(), rabbit_types:vhost(), + rabbit_types:protocol(), pid()} | + {'direct', rabbit_channel:channel_number(), pid(), rabbit_types:user(), + rabbit_types:vhost(), rabbit_types:protocol(), pid()}). -spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}). @@ -43,7 +43,7 @@ %%---------------------------------------------------------------------------- -start_link({tcp, Protocol, Sock, Channel, FrameMax, ReaderPid, User, VHost, +start_link({tcp, Sock, Channel, FrameMax, ReaderPid, User, VHost, Protocol, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, WriterPid} = @@ -61,7 +61,7 @@ start_link({tcp, Protocol, Sock, Channel, FrameMax, ReaderPid, User, VHost, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), {ok, SupPid, {ChannelPid, AState}}; -start_link({direct, Protocol, Channel, ClientChannelPid, User, VHost, +start_link({direct, Channel, ClientChannelPid, User, VHost, Protocol, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 3b8c9fba..8ee7aafc 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -16,7 +16,7 @@ -module(rabbit_direct). --export([boot/0, connect/3, start_channel/5]). +-export([boot/0, connect/3, start_channel/6]). -include("rabbit.hrl"). @@ -28,9 +28,9 @@ -spec(connect/3 :: (binary(), binary(), binary()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). --spec(start_channel/5 :: (rabbit_channel:channel_number(), pid(), - rabbit_types:user(), rabbit_types:vhost(), pid()) -> - {'ok', pid()}). +-spec(start_channel/6 :: (rabbit_channel:channel_number(), pid(), + rabbit_types:user(), rabbit_types:vhost(), + rabbit_types:protocol(), pid()) -> {'ok', pid()}). -endif. @@ -67,9 +67,9 @@ connect(Username, Password, VHost) -> {error, broker_not_found_on_node} end. -start_channel(Number, ClientChannelPid, User, VHost, Collector) -> +start_channel(Number, ClientChannelPid, User, VHost, Protocol, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( - rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, User, VHost, Collector}]), + rabbit_direct_client_sup, [{direct, Number, ClientChannelPid, User, + VHost, Protocol, Collector}]), {ok, ChannelPid}. diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index a9403105..f54d52e5 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -852,8 +852,8 @@ send_to_new_channel(Channel, AnalyzedFrame, State) -> vhost = VHost}} = State, {ok, _ChSupPid, {ChPid, AState}} = rabbit_channel_sup_sup:start_channel( - ChanSupSup, {tcp, Protocol, Sock, Channel, FrameMax, self(), User, - VHost, Collector}), + ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), User, VHost, + Protocol, Collector}), MRef = erlang:monitor(process, ChPid), NewAState = process_channel_frame(AnalyzedFrame, self(), Channel, ChPid, AState), diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 45a11766..f176dee9 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1020,8 +1020,9 @@ test_server_status() -> %% create a few things so there is some useful information to list Writer = spawn(fun () -> receive shutdown -> ok end end), {ok, Ch} = rabbit_channel:start_link( - rabbit_framing_amqp_0_9_1, 1, self(), Writer, user(<<"user">>), - <<"/">>, self(), fun (_) -> {ok, self()} end), + 1, self(), Writer, user(<<"user">>), <<"/">>, + rabbit_framing_amqp_0_9_1, self(), + fun (_) -> {ok, self()} end), [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], {new, Queue = #amqqueue{}} <- [rabbit_amqqueue:declare( @@ -1080,8 +1081,9 @@ test_spawn(Receiver) -> Me = self(), Writer = spawn(fun () -> Receiver(Me) end), {ok, Ch} = rabbit_channel:start_link( - rabbit_framing_amqp_0_9_1, 1, Me, Writer, user(<<"guest">>), - <<"/">>, self(), fun (_) -> {ok, self()} end), + 1, Me, Writer, user(<<"guest">>), <<"/">>, + rabbit_framing_amqp_0_9_1, self(), + fun (_) -> {ok, self()} end), ok = rabbit_channel:do(Ch, #'channel.open'{}), receive #'channel.open_ok'{} -> ok after 1000 -> throw(failed_to_receive_channel_open_ok) -- cgit v1.2.1 From 74b1a058173948328d6b1ad7565ab2d82b840848 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Feb 2011 12:36:40 +0000 Subject: /Consistently/... --- src/rabbit_channel_sup.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index f528a9c6..fdaabdfb 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -56,7 +56,7 @@ start_link({tcp, Sock, Channel, FrameMax, ReaderPid, User, VHost, Protocol, supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Protocol, Channel, ReaderPid, WriterPid, User, VHost, + [Channel, ReaderPid, WriterPid, User, VHost, Protocol, Collector, start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), @@ -68,8 +68,8 @@ start_link({direct, Channel, ClientChannelPid, User, VHost, Protocol, supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Protocol, Channel, ClientChannelPid, ClientChannelPid, - User, VHost, Collector, start_limiter_fun(SupPid)]}, + [Channel, ClientChannelPid, ClientChannelPid, User, + VHost, Protocol, Collector, start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. -- cgit v1.2.1 From 75a45fa69fd42ba91aaca751102e8f53cfad75f5 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Feb 2011 13:51:58 +0000 Subject: 404 => NOT_FOUND --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index f176dee9..9f12be7c 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1307,7 +1307,7 @@ test_queue_cleanup(_SecondaryNode) -> rabbit_channel:do(Ch, #'queue.declare'{ passive = true, queue = ?CLEANUP_QUEUE_NAME }), receive - #'channel.close'{reply_code = 404} -> + #'channel.close'{reply_code = ?NOT_FOUND} -> ok after 2000 -> throw(failed_to_receive_channel_exit) -- cgit v1.2.1 From 545aa642f2ce2218948a9786d0638637a72a2768 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Tue, 15 Feb 2011 09:20:07 -0600 Subject: adding wait_for_tables after database upgrade --- src/rabbit_mnesia.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 51b6c6a9..eac7dd14 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -388,7 +388,8 @@ init_db(ClusterNodes, Force) -> %% True single disc node, attempt upgrade ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade() of - ok -> ensure_schema_ok(); + ok -> ok = wait_for_tables(), + ensure_schema_ok(); version_not_available -> schema_ok_or_move() end; {[], true, _} -> @@ -559,8 +560,8 @@ wait_for_tables() -> wait_for_tables(table_names()). wait_for_tables(TableNames) -> - Inexistent = TableNames -- mnesia:system_info(tables), - case mnesia:wait_for_tables(TableNames -- Inexistent, 30000) of + Nonexistent = TableNames -- mnesia:system_info(tables), + case mnesia:wait_for_tables(TableNames -- Nonexistent, 30000) of ok -> ok; {timeout, BadTabs} -> throw({error, {timeout_waiting_for_tables, BadTabs}}); -- cgit v1.2.1 From 17ebfb85ebc28c01dfc29e7089dbbf6d1688bc6c Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 15 Feb 2011 15:27:52 +0000 Subject: Sender-specified distribution updates --- include/rabbit_backing_queue_spec.hrl | 4 +--- src/rabbit_basic.erl | 41 +++++++++++++++++++++------------- src/rabbit_channel.erl | 36 +++++++++++++++++------------- src/rabbit_msg_store.erl | 42 +++++++++++++++++++++++------------ src/rabbit_upgrade_functions.erl | 19 ---------------- src/rabbit_variable_queue.erl | 18 ++++++++++++++- 6 files changed, 93 insertions(+), 67 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 52ffd413..17cdedc2 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,6 +65,4 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(transform_storage/1 :: - (fun ((binary()) -> (rabbit_types:ok_or_error2(any(), any())))) -> - non_neg_integer()). +-spec(multiple_routing_keys/0 :: () -> 'ok'). diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 7fa68882..503f01bc 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -31,6 +31,7 @@ -type(publish_result() :: ({ok, rabbit_router:routing_result(), [pid()]} | rabbit_types:error('not_found'))). +-type(msg_or_error() :: {'ok', rabbit_types:message()} | {'error', any()}). -spec(publish/1 :: (rabbit_types:delivery()) -> publish_result()). @@ -40,10 +41,10 @@ rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> rabbit_types:message()). + properties_input(), binary()) -> msg_or_error()). -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - rabbit_types:decoded_content()) -> rabbit_types:message()). + rabbit_types:decoded_content()) -> msg_or_error()). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -111,17 +112,23 @@ strip_header(DecodedContent, _Key) -> message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> - #basic_message{ - exchange_name = ExchangeName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - guid = rabbit_guid:guid(), - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | header_routes(Props#'P_basic'.headers)]}. - -message(ExchangeName, RoutingKeyBin, RawProperties, BodyBin) -> + try + {ok, #basic_message{ + exchange_name = ExchangeName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + guid = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | + header_routes(Props#'P_basic'.headers)]}} + catch + {error, _Reason} = Error -> Error + end. + +message(ExchangeName, RoutingKey, RawProperties, BodyBin) -> Properties = properties(RawProperties), Content = build_content(Properties, BodyBin), - message(ExchangeName, RoutingKeyBin, Content). + {ok, Msg} = message(ExchangeName, RoutingKey, Content), + Msg. properties(P = #'P_basic'{}) -> P; @@ -170,8 +177,12 @@ is_message_persistent(#content{properties = #'P_basic'{ header_routes(undefined) -> []; header_routes(HeadersTable) -> - lists:append([case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {array, Routes} -> [Route || {longstr, Route} <- Routes]; - _ -> [] - end || HeaderKey <- ?ROUTING_HEADERS]). + lists:append( + [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + undefined -> []; + {Type, _Val} -> throw({error, {unacceptable_type_in_header, + Type, + binary_to_list(HeaderKey)}}) + end || HeaderKey <- ?ROUTING_HEADERS]). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 16a3911d..162580ec 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -555,21 +555,27 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, true -> SeqNo = State#ch.publish_seqno, {SeqNo, State#ch{publish_seqno = SeqNo + 1}} end, - Message = rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent), - {RoutingRes, DeliveredQPids} = - rabbit_exchange:publish( - Exchange, - rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message, - MsgSeqNo)), - State2 = process_routing_result(RoutingRes, DeliveredQPids, ExchangeName, - MsgSeqNo, Message, State1), - maybe_incr_stats([{ExchangeName, 1} | - [{{QPid, ExchangeName}, 1} || - QPid <- DeliveredQPids]], publish, State2), - {noreply, case TxnKey of - none -> State2; - _ -> add_tx_participants(DeliveredQPids, State2) - end}; + case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of + {ok, Message} -> + {RoutingRes, DeliveredQPids} = + rabbit_exchange:publish( + Exchange, + rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message, + MsgSeqNo)), + State2 = process_routing_result(RoutingRes, DeliveredQPids, + ExchangeName, MsgSeqNo, Message, + State1), + maybe_incr_stats([{ExchangeName, 1} | + [{{QPid, ExchangeName}, 1} || + QPid <- DeliveredQPids]], publish, State2), + {noreply, case TxnKey of + none -> State2; + _ -> add_tx_participants(DeliveredQPids, State2) + end}; + {error, Reason} -> + rabbit_misc:protocol_error(precondition_failed, + "invalid message: ~p", [Reason]) + end; handle_method(#'basic.nack'{delivery_tag = DeliveryTag, multiple = Multiple, diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index f7afbef5..00c2ab18 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1968,29 +1968,43 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {destination, Destination}]} end. -force_recovery(BaseDir, Server) -> - Dir = filename:join(BaseDir, atom_to_list(Server)), +force_recovery(BaseDir, Store) -> + Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), [file:delete(filename:join(Dir, File)) || File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -transform_dir(BaseDir, Server, TransformFun) -> - Dir = filename:join(BaseDir, atom_to_list(Server)), +for_each_file(Files, Fun) -> + [Fun(File) || File <- Files]. + +transform_dir(BaseDir, Store, TransformFun) -> + Dir = filename:join(BaseDir, atom_to_list(Store)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); false -> - [transform_msg_file(filename:join(Dir, File), - filename:join(TmpDir, File), - TransformFun) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], - [file:delete(filename:join(Dir, File)) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION)], - [file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) || - File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], - [file:delete(filename:join(TmpDir, File)) || - File <- list_sorted_file_names(TmpDir, ?FILE_EXTENSION)], + OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), + for_each_file(OldFileList, + fun (File) -> + transform_msg_file(filename:join(Dir, File), + filename:join(TmpDir, File), + TransformFun) + end), + for_each_file(OldFileList, + fun (File) -> + file:delete(filename:join(Dir, File)) + end), + NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), + for_each_file(NewFileList, + fun (File) -> + file:copy(filename:join(TmpDir, File), + filename:join(Dir, File)) + end), + for_each_file(NewFileList, + fun (File) -> + file:delete(filename:join(TmpDir, File)) + end), ok = file:del_dir(TmpDir) end. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 73f59557..68b88b3e 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -25,7 +25,6 @@ -rabbit_upgrade({add_ip_to_listener, []}). -rabbit_upgrade({internal_exchanges, []}). -rabbit_upgrade({user_to_internal_user, [hash_passwords]}). --rabbit_upgrade({multiple_routing_keys, []}). %% ------------------------------------------------------------------- @@ -36,7 +35,6 @@ -spec(add_ip_to_listener/0 :: () -> 'ok'). -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). --spec(multiple_routing_keys/0 :: () -> 'ok'). -endif. @@ -103,20 +101,3 @@ mnesia(TableName, Fun, FieldList, NewRecordName) -> {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, NewRecordName), ok. - -%%-------------------------------------------------------------------- - -multiple_routing_keys() -> - rabbit_variable_queue:transform_storage( - fun (BinMsg) -> - case binary_to_term(BinMsg) of - {basic_message, ExchangeName, Routing_Key, Content, Guid, - Persistent} -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - _ -> - {error, corrupt_message} - end - end), - ok. - diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index dee6a8e5..b0781f8f 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, transform_storage/1]). + status/1, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -294,6 +294,8 @@ %%---------------------------------------------------------------------------- +-rabbit_upgrade({multiple_routing_keys, []}). + -ifdef(use_specs). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). @@ -1806,6 +1808,20 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> %% Upgrading %%---------------------------------------------------------------------------- +multiple_routing_keys() -> + transform_storage( + fun (BinMsg) -> + case binary_to_term(BinMsg) of + {basic_message, ExchangeName, Routing_Key, Content, Guid, + Persistent} -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + _ -> + {error, corrupt_message} + end + end), + ok. + %% Assumes message store is not running transform_storage(TransformFun) -> transform_store(?PERSISTENT_MSG_STORE, TransformFun), -- cgit v1.2.1 From 944de8b5e3aec103afc672666bbf6044e8379016 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 15 Feb 2011 15:56:04 +0000 Subject: Swapped helper function arguments --- src/rabbit_msg_store.erl | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 00c2ab18..a9d1e210 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1975,7 +1975,7 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(Files, Fun) -> +for_each_file(Fun, Files) -> [Fun(File) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> @@ -1985,26 +1985,22 @@ transform_dir(BaseDir, Store, TransformFun) -> true -> throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(OldFileList, - fun (File) -> + for_each_file(fun (File) -> transform_msg_file(filename:join(Dir, File), filename:join(TmpDir, File), TransformFun) - end), - for_each_file(OldFileList, - fun (File) -> + end, OldFileList), + for_each_file(fun (File) -> file:delete(filename:join(Dir, File)) - end), + end, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(NewFileList, - fun (File) -> + for_each_file(fun (File) -> file:copy(filename:join(TmpDir, File), filename:join(Dir, File)) - end), - for_each_file(NewFileList, - fun (File) -> + end, NewFileList), + for_each_file(fun (File) -> file:delete(filename:join(TmpDir, File)) - end), + end, NewFileList), ok = file:del_dir(TmpDir) end. -- cgit v1.2.1 From f300f1594b4224a4c20e1f39a138f3471f6e469e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 11:25:59 +0000 Subject: Shorten transform_dir. --- src/rabbit_msg_store.erl | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index a9d1e210..5bfd48fb 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1975,32 +1975,27 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(Fun, Files) -> - [Fun(File) || File <- Files]. +for_each_file(D) -> + fun(Fun, Files) -> [Fun(filename:join(D, File)) || File <- Files] end. + +for_each_file(D1, D2) -> + fun(Fun, Files) -> [Fun(filename:join(D1, File), + filename:join(D2, File)) || File <- Files] end. transform_dir(BaseDir, Store, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Store)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), + TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, case filelib:is_dir(TmpDir) of - true -> throw({error, transform_failed_previously}); + true -> + throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(fun (File) -> - transform_msg_file(filename:join(Dir, File), - filename:join(TmpDir, File), - TransformFun) - end, OldFileList), - for_each_file(fun (File) -> - file:delete(filename:join(Dir, File)) - end, OldFileList), + (for_each_file(Dir, TmpDir))(TransformFile, OldFileList), + (for_each_file(Dir) )(fun file:delete/1, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(fun (File) -> - file:copy(filename:join(TmpDir, File), - filename:join(Dir, File)) - end, NewFileList), - for_each_file(fun (File) -> - file:delete(filename:join(TmpDir, File)) - end, NewFileList), + (for_each_file(TmpDir, Dir))(fun file:copy/2, NewFileList), + (for_each_file(TmpDir) )(fun file:delete/1, NewFileList), ok = file:del_dir(TmpDir) end. -- cgit v1.2.1 From 789f49a33719c34d11c4385e67e46ac6bc081617 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 11:43:14 +0000 Subject: Matthias points out this does not need to be second order. --- src/rabbit_msg_store.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 5bfd48fb..fd3027e9 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1975,12 +1975,11 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(D) -> - fun(Fun, Files) -> [Fun(filename:join(D, File)) || File <- Files] end. +for_each_file(D, Fun, Files) -> + [Fun(filename:join(D, File)) || File <- Files]. -for_each_file(D1, D2) -> - fun(Fun, Files) -> [Fun(filename:join(D1, File), - filename:join(D2, File)) || File <- Files] end. +for_each_file(D1, D2, Fun, Files) -> + [Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Store)), @@ -1991,11 +1990,11 @@ transform_dir(BaseDir, Store, TransformFun) -> throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - (for_each_file(Dir, TmpDir))(TransformFile, OldFileList), - (for_each_file(Dir) )(fun file:delete/1, OldFileList), + for_each_file(Dir, TmpDir, TransformFile, OldFileList), + for_each_file(Dir, fun file:delete/1, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - (for_each_file(TmpDir, Dir))(fun file:copy/2, NewFileList), - (for_each_file(TmpDir) )(fun file:delete/1, NewFileList), + for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), + for_each_file(TmpDir, fun file:delete/1, NewFileList), ok = file:del_dir(TmpDir) end. -- cgit v1.2.1 From 46b79df30bb8eac1905dfd2cc73e760f753b99f6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 12:03:22 +0000 Subject: Aesthetics --- src/rabbit_msg_store.erl | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index fd3027e9..3d7411a9 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1986,16 +1986,14 @@ transform_dir(BaseDir, Store, TransformFun) -> TmpDir = filename:join(Dir, ?TRANSFORM_TMP), TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, case filelib:is_dir(TmpDir) of - true -> - throw({error, transform_failed_previously}); - false -> - OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(Dir, TmpDir, TransformFile, OldFileList), - for_each_file(Dir, fun file:delete/1, OldFileList), - NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), - for_each_file(TmpDir, fun file:delete/1, NewFileList), - ok = file:del_dir(TmpDir) + true -> throw({error, transform_failed_previously}); + false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), + for_each_file(Dir, TmpDir, TransformFile, OldFileList), + for_each_file(Dir, fun file:delete/1, OldFileList), + NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), + for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), + for_each_file(TmpDir, fun file:delete/1, NewFileList), + ok = file:del_dir(TmpDir) end. transform_msg_file(FileOld, FileNew, TransformFun) -> -- cgit v1.2.1 From c34c4592aaf59da9771a6a9a51de076d11da90a8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 16:46:39 +0000 Subject: Revert where rabbit:prepare happens. --- src/rabbit_prelaunch.erl | 2 ++ src/rabbit_upgrade.erl | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 612aec80..3283e8fd 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -235,6 +235,8 @@ post_process_script(ScriptFile) -> {error, {failed_to_load_script, Reason}} end. +process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> + [{apply,{rabbit,prepare,[]}}, Entry]; process_entry(Entry = {apply,{application,start_boot,[mnesia,permanent]}}) -> [{apply,{rabbit_upgrade,maybe_upgrade_mnesia,[]}}, Entry]; process_entry(Entry) -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index c8d2ae87..73b9bb0e 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -46,7 +46,6 @@ %% ------------------------------------------------------------------- maybe_upgrade_mnesia() -> - rabbit:prepare(), Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of version_not_available -> -- cgit v1.2.1 From 6cae135624ca1ae276ec89066593fb11683021d5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 16:49:07 +0000 Subject: Rename --- src/rabbit_upgrade.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 73b9bb0e..da735b83 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -58,17 +58,17 @@ maybe_upgrade_mnesia() -> [] -> ok; Upgrades -> - case am_i_upgrader(Nodes) of - true -> primary_upgrade(Upgrades, Nodes); - false -> non_primary_upgrade(Nodes) + case upgrade_mode(Nodes) of + primary -> primary_upgrade(Upgrades, Nodes); + secondary -> non_primary_upgrade(Nodes) end end. -am_i_upgrader(Nodes) -> +upgrade_mode(Nodes) -> case nodes_running(Nodes) of [] -> case am_i_disc_node() of - true -> true; + true -> primary; false -> die("Cluster upgrade needed but this is a ram " "node.~n Please start any of the disc nodes " "first.", []) @@ -85,7 +85,7 @@ am_i_upgrader(Nodes) -> ClusterVersion -> %% The other node(s) have upgraded already, I am not the %% upgrader - false; + secondary; MyVersion -> %% The other node(s) are running an unexpected version. die("Cluster upgrade needed but other nodes are " -- cgit v1.2.1 From b978524f5b06030cda66634a9e17cdca7dcb4fb7 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 17:08:08 +0000 Subject: Prose --- src/rabbit_upgrade.erl | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index da735b83..0fdb973b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -45,6 +45,47 @@ %% ------------------------------------------------------------------- +%% The upgrade logic is quite involved, due to the existence of +%% clusters. +%% +%% Firstly, we have two different types of upgrades to do: Mnesia and +%% everythinq else. Mnesia upgrades need to only be done by one node +%% in the cluster (we treat a non-clustered node as a single-node +%% cluster). This is the primary upgrader. The other upgrades need to +%% be done by all nodes. +%% +%% The primary upgrader has to start first (and do its Mnesia +%% upgrades). Secondary upgraders need to reset their Mnesia database +%% and then rejoin the cluster. They can't do the Mnesia upgrades as +%% well and then merge databases since the cookie for each table will +%% end up different and the merge will fail. +%% +%% This in turn means that we need to determine whether we are the +%% primary or secondary upgrader *before* Mnesia comes up. If we +%% didn't then the secondary upgrader would try to start Mnesia, and +%% either hang waiting for a node which is not yet up, or fail since +%% its schema differs from the other nodes in the cluster. +%% +%% Also, the primary upgrader needs to start Mnesia to do its +%% upgrades, but needs to forcibly load tables rather than wait for +%% them (in case it was not the last node to shut down, in which case +%% it would wait forever). +%% +%% This in turn means that maybe_upgrade_mnesia/0 has to be patched +%% into the boot process by prelaunch before the mnesia application is +%% started. By the time Mnesia is started the upgrades have happened +%% (on the primary), or Mnesia has been reset (on the secondary) and +%% rabbit_mnesia:init_db/2 can then make the node rejoin the clister +%% in the normal way. +%% +%% The non-mnesia upgrades are then triggered by +%% rabbit_mnesia:init_db/2. Of course, it's possible for a given +%% upgrade process to only require Mnesia upgrades, or only require +%% non-Mnesia upgrades. In the latter case no Mnesia resets and +%% reclusterings occur. + +%% ------------------------------------------------------------------- + maybe_upgrade_mnesia() -> Nodes = rabbit_mnesia:all_clustered_nodes(), case upgrades_required(mnesia) of -- cgit v1.2.1 From ea73a62e8b8a86883fb8683d7f61a5693a519f46 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Feb 2011 17:58:58 +0000 Subject: (Untested) Record the nodes that were up when we shut down. --- src/rabbit.erl | 1 + src/rabbit_mnesia.erl | 36 +++++++++++++++++++++++++++++++++++- src/rabbit_upgrade.erl | 46 +++++++++++++++++++++++++++++++++++++--------- 3 files changed, 73 insertions(+), 10 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 1beed5c1..ffb6610d 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -203,6 +203,7 @@ start() -> end. stop() -> + rabbit_mnesia:record_running_disc_nodes(), ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e7da6a43..3f7fc0d8 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -21,7 +21,9 @@ cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, - create_cluster_nodes_config/1, read_cluster_nodes_config/0]). + create_cluster_nodes_config/1, read_cluster_nodes_config/0, + record_running_disc_nodes/0, read_previous_run_disc_nodes/0, + delete_previous_run_disc_nodes/0, running_nodes_filename/0]). -export([table_names/0]). @@ -57,6 +59,10 @@ -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). -spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). -spec(read_cluster_nodes_config/0 :: () -> [node()]). +-spec(record_running_disc_nodes/0 :: () -> 'ok'). +-spec(read_previous_run_disc_nodes/0 :: () -> [node()]). +-spec(delete_previous_run_disc_nodes/0 :: () -> 'ok'). +-spec(running_nodes_filename/0 :: () -> file:filename()). -endif. @@ -349,6 +355,34 @@ delete_cluster_nodes_config() -> FileName, Reason}}) end. +running_nodes_filename() -> + dir() ++ "/nodes_running_at_shutdown". + +record_running_disc_nodes() -> + FileName = running_nodes_filename(), + Nodes = rabbit_mnesia:nodes_of_type(disc_copies) -- [node()], + %% Don't check the result: we're shutting down anyway and this is + %% a best-effort-basis. + rabbit_misc:write_term_file(FileName, [Nodes]). + +read_previous_run_disc_nodes() -> + FileName = running_nodes_filename(), + case rabbit_misc:read_term_file(FileName) of + {ok, [Nodes]} -> Nodes; + {error, enoent} -> []; + {error, Reason} -> throw({error, {cannot_read_previous_nodes_file, + FileName, Reason}}) + end. + +delete_previous_run_disc_nodes() -> + FileName = running_nodes_filename(), + case file:delete(FileName) of + ok -> ok; + {error, enoent} -> ok; + {error, Reason} -> throw({error, {cannot_delete_previous_nodes_file, + FileName, Reason}}) + end. + %% Take a cluster node config and create the right kind of node - a %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0fdb973b..23770686 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -49,8 +49,8 @@ %% clusters. %% %% Firstly, we have two different types of upgrades to do: Mnesia and -%% everythinq else. Mnesia upgrades need to only be done by one node -%% in the cluster (we treat a non-clustered node as a single-node +%% everythinq else. Mnesia upgrades must only be done by one node in +%% the cluster (we treat a non-clustered node as a single-node %% cluster). This is the primary upgrader. The other upgrades need to %% be done by all nodes. %% @@ -75,7 +75,7 @@ %% into the boot process by prelaunch before the mnesia application is %% started. By the time Mnesia is started the upgrades have happened %% (on the primary), or Mnesia has been reset (on the secondary) and -%% rabbit_mnesia:init_db/2 can then make the node rejoin the clister +%% rabbit_mnesia:init_db/2 can then make the node rejoin the cluster %% in the normal way. %% %% The non-mnesia upgrades are then triggered by @@ -83,6 +83,22 @@ %% upgrade process to only require Mnesia upgrades, or only require %% non-Mnesia upgrades. In the latter case no Mnesia resets and %% reclusterings occur. +%% +%% The primary upgrader needs to be a disc node. Ideally we would like +%% it to be the last disc node to shut down (since otherwise there's a +%% risk of data loss). On each node we therefore record the disc nodes +%% that were still running when we shut down. A disc node that knows +%% other nodes were up when it shut down, or a ram node, will refuse +%% to be the primary upgrader, and will thus not start when upgrades +%% are needed. +%% +%% However, this is racy if several nodes are shut down at once. Since +%% rabbit records the running nodes, and shuts down before mnesia, the +%% race manifests as all disc nodes thinking they are not the primary +%% upgrader. Therefore the user can remove the record of the last disc +%% node to shut down to get things going again. This may lose any +%% mnesia changes that happened after the node chosen as the primary +%% upgrader was shut down. %% ------------------------------------------------------------------- @@ -103,16 +119,28 @@ maybe_upgrade_mnesia() -> primary -> primary_upgrade(Upgrades, Nodes); secondary -> non_primary_upgrade(Nodes) end - end. + end, + ok = rabbit_mnesia:delete_previous_run_disc_nodes(). upgrade_mode(Nodes) -> case nodes_running(Nodes) of [] -> - case am_i_disc_node() of - true -> primary; - false -> die("Cluster upgrade needed but this is a ram " - "node.~n Please start any of the disc nodes " - "first.", []) + AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), + case {am_i_disc_node(), AfterUs} of + {true, []} -> + primary; + {true, _} -> + Filename = rabbit_mnesia:running_nodes_filename(), + die("Cluster upgrade needed but other disc nodes shut " + "down after this one.~n Please start one of the " + "disc nodes: ~p first.~n~n Note: if several disc " + "nodes were shut down simultaneously they may all " + "show this message. In which case, remove ~s on one " + "of them and start that.", [AfterUs, Filename]); + {false, _} -> + die("Cluster upgrade needed but this is a ram " + "node.~n Please start one of the disc nodes: " + "~p first.", [AfterUs]) end; [Another|_] -> ClusterVersion = -- cgit v1.2.1 From b30f89113d57a303c52739712408440d75605532 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:11:45 +0000 Subject: Oops, that's not exported. --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 3f7fc0d8..8acb0b02 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -360,7 +360,7 @@ running_nodes_filename() -> record_running_disc_nodes() -> FileName = running_nodes_filename(), - Nodes = rabbit_mnesia:nodes_of_type(disc_copies) -- [node()], + Nodes = nodes_of_type(disc_copies) -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. rabbit_misc:write_term_file(FileName, [Nodes]). -- cgit v1.2.1 From 11a1cc6eac6bd4cde6bd971763348d0384ec2520 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:39:45 +0000 Subject: Fix our idea of which nodes were running when we shut down. --- src/rabbit_mnesia.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 8acb0b02..367eb6f8 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -360,7 +360,10 @@ running_nodes_filename() -> record_running_disc_nodes() -> FileName = running_nodes_filename(), - Nodes = nodes_of_type(disc_copies) -- [node()], + Nodes = sets:to_list( + sets:intersection( + sets:from_list(nodes_of_type(disc_copies)), + sets:from_list(running_clustered_nodes()))) -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. rabbit_misc:write_term_file(FileName, [Nodes]). -- cgit v1.2.1 From a20039bacdf9f9ca06b82e7673a6a423318fb269 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:45:21 +0000 Subject: Make the error messages more readable. --- src/rabbit_upgrade.erl | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 23770686..0c2e4bce 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -109,7 +109,7 @@ maybe_upgrade_mnesia() -> case Nodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " - "< 2.1.1.~n Unfortunately you will need to " + "< 2.1.1.~nUnfortunately you will need to " "rebuild the cluster.", []) end; [] -> @@ -132,15 +132,19 @@ upgrade_mode(Nodes) -> {true, _} -> Filename = rabbit_mnesia:running_nodes_filename(), die("Cluster upgrade needed but other disc nodes shut " - "down after this one.~n Please start one of the " - "disc nodes: ~p first.~n~n Note: if several disc " - "nodes were shut down simultaneously they may all " - "show this message. In which case, remove ~s on one " - "of them and start that.", [AfterUs, Filename]); + "down after this one.~nPlease first start the last " + "disc node to shut down.~nThe disc nodes that were " + "still running when this one shut down are:~n~n" + " ~p~n~nNote: if several disc nodes were shut down " + "simultaneously they may all~nshow this message. " + "In which case, remove the lock file on one of them " + "and~nstart that node. The lock file on this node " + "is:~n~n ~s ", + [AfterUs, Filename]); {false, _} -> - die("Cluster upgrade needed but this is a ram " - "node.~n Please start one of the disc nodes: " - "~p first.", [AfterUs]) + die("Cluster upgrade needed but this is a ram node.~n" + "Please first start the last disc node to shut down.", + []) end; [Another|_] -> ClusterVersion = @@ -176,7 +180,7 @@ die(Msg, Args) -> %% straight out into do_boot, generating an erl_crash.dump %% and displaying any error message in a confusing way. error_logger:error_msg(Msg, Args), - io:format("~n~n** " ++ Msg ++ " **~n~n~n", Args), + io:format("~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args), error_logger:logfile(close), halt(1). -- cgit v1.2.1 From f673f3919cad23798116ca2f63de64a5b36b03b4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Feb 2011 10:58:18 +0000 Subject: Retain ram-nodeness when upgrading. --- src/rabbit_upgrade.erl | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0c2e4bce..56dab3e9 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -103,10 +103,11 @@ %% ------------------------------------------------------------------- maybe_upgrade_mnesia() -> - Nodes = rabbit_mnesia:all_clustered_nodes(), + AllNodes = rabbit_mnesia:all_clustered_nodes(), + KnownDiscNodes = rabbit_mnesia:read_cluster_nodes_config(), case upgrades_required(mnesia) of version_not_available -> - case Nodes of + case AllNodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " "< 2.1.1.~nUnfortunately you will need to " @@ -115,18 +116,18 @@ maybe_upgrade_mnesia() -> [] -> ok; Upgrades -> - case upgrade_mode(Nodes) of - primary -> primary_upgrade(Upgrades, Nodes); - secondary -> non_primary_upgrade(Nodes) + case upgrade_mode(AllNodes, KnownDiscNodes) of + primary -> primary_upgrade(Upgrades, AllNodes); + secondary -> secondary_upgrade(KnownDiscNodes) end end, ok = rabbit_mnesia:delete_previous_run_disc_nodes(). -upgrade_mode(Nodes) -> - case nodes_running(Nodes) of +upgrade_mode(AllNodes, KnownDiscNodes) -> + case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), - case {am_i_disc_node(), AfterUs} of + case {am_i_disc_node(KnownDiscNodes), AfterUs} of {true, []} -> primary; {true, _} -> @@ -167,10 +168,10 @@ upgrade_mode(Nodes) -> end end. -am_i_disc_node() -> +am_i_disc_node(KnownDiscNodes) -> %% The cluster config does not list all disc nodes, but it will list us %% if we're one. - case rabbit_mnesia:read_cluster_nodes_config() of + case KnownDiscNodes of [] -> true; DiscNodes -> lists:member(node(), DiscNodes) end. @@ -204,10 +205,10 @@ primary_upgrade(Upgrades, Nodes) -> force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. -non_primary_upgrade(Nodes) -> +secondary_upgrade(KnownDiscNodes) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), - ok = rabbit_mnesia:create_cluster_nodes_config(Nodes), + ok = rabbit_mnesia:create_cluster_nodes_config(KnownDiscNodes), write_version(mnesia), ok. -- cgit v1.2.1 From 2bf5a24342c350511ec5ec9de6a1f1c1e8496e64 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 12:01:49 +0000 Subject: Implement try-restart and condrestart as intended --- packaging/common/rabbitmq-server.init | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index c1647dc5..93c9e0e7 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -101,6 +101,16 @@ rotate_logs_rabbitmq() { set -e } +restart_running_rabbitmq () { + status_rabbitmq quiet + if [ $RETVAL = 0 ] ; then + restart_rabbitmq + else + echo RabbitMQ is not runnning + RETVAL=0 + fi +} + restart_rabbitmq() { stop_rabbitmq start_rabbitmq @@ -124,11 +134,16 @@ case "$1" in echo -n "Rotating log files for $DESC: " rotate_logs_rabbitmq ;; - force-reload|reload|restart|condrestart|try-restart) + force-reload|reload|restart) echo -n "Restarting $DESC: " restart_rabbitmq echo "$NAME." ;; + condrestart|try-restart) + echo -n "Restarting $DESC: " + restart_running_rabbitmq + echo "$NAME." + ;; *) echo "Usage: $0 {start|stop|status|rotate-logs|restart|condrestart|try-restart|reload|force-reload}" >&2 RETVAL=1 -- cgit v1.2.1 From e5f82cb1200cb97b097306ab35d26250afdda777 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 13:33:43 +0000 Subject: Remove condrestart target --- packaging/common/rabbitmq-server.init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 93c9e0e7..916dee6f 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -139,7 +139,7 @@ case "$1" in restart_rabbitmq echo "$NAME." ;; - condrestart|try-restart) + try-restart) echo -n "Restarting $DESC: " restart_running_rabbitmq echo "$NAME." -- cgit v1.2.1 From 1bafead0212d17e41198121a83ed44ea1bd506b8 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 17:01:03 +0000 Subject: Maybe monitor queues on consume, maybe unmonitor on cancel --- src/rabbit_channel.erl | 65 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index a6790b6c..346ec371 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -33,9 +33,9 @@ start_limiter_fun, transaction_id, tx_participants, next_tag, uncommitted_ack_q, unacked_message_q, user, virtual_host, most_recently_declared_queue, - consumer_mapping, blocking, queue_collector_pid, stats_timer, - confirm_enabled, publish_seqno, unconfirmed, confirmed, - capabilities}). + consumer_mapping, blocking, consumer_monitors, queue_collector_pid, + stats_timer, confirm_enabled, publish_seqno, unconfirmed, + confirmed, capabilities}). -define(MAX_PERMISSION_CACHE_SIZE, 12). @@ -171,6 +171,7 @@ init([Channel, ReaderPid, WriterPid, User, VHost, Capabilities, CollectorPid, most_recently_declared_queue = <<>>, consumer_mapping = dict:new(), blocking = dict:new(), + consumer_monitors = dict:new(), queue_collector_pid = CollectorPid, stats_timer = StatsTimer, confirm_enabled = false, @@ -646,9 +647,11 @@ handle_method(#'basic.consume'{queue = QueueNameBin, no_ack = NoAck, exclusive = ExclusiveConsume, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid, - limiter_pid = LimiterPid, - consumer_mapping = ConsumerMapping }) -> + _, State = #ch{reader_pid = ReaderPid, + limiter_pid = LimiterPid, + consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, + capabilities = Capabilities}) -> case dict:find(ConsumerTag, ConsumerMapping) of error -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), @@ -665,18 +668,31 @@ handle_method(#'basic.consume'{queue = QueueNameBin, case rabbit_amqqueue:with_exclusive_access_or_die( QueueName, ReaderPid, fun (Q) -> - rabbit_amqqueue:basic_consume( - Q, NoAck, self(), LimiterPid, - ActualConsumerTag, ExclusiveConsume, - ok_msg(NoWait, #'basic.consume_ok'{ - consumer_tag = ActualConsumerTag})) + {rabbit_amqqueue:basic_consume( + Q, NoAck, self(), LimiterPid, + ActualConsumerTag, ExclusiveConsume, + ok_msg(NoWait, #'basic.consume_ok'{ + consumer_tag = ActualConsumerTag})), + Q#amqqueue.pid} end) of - ok -> + {ok, QPid} -> + {ConsumerMonitors1, MRef} = + case rabbit_misc:table_lookup( + Capabilities, + <<"consumer_death_notification">>) of + {bool, true} -> + MRef1 = erlang:monitor(process, QPid), + {dict:store(MRef1, ActualConsumerTag, + ConsumerMonitors), MRef1}; + _ -> + {ConsumerMonitors, undefined} + end, {noreply, State#ch{consumer_mapping = dict:store(ActualConsumerTag, - QueueName, - ConsumerMapping)}}; - {error, exclusive_consume_unavailable} -> + {QueueName, MRef}, + ConsumerMapping), + consumer_monitors = ConsumerMonitors1}}; + {{error, exclusive_consume_unavailable}, _QPid} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", [rabbit_misc:rs(QueueName)]) @@ -689,16 +705,23 @@ handle_method(#'basic.consume'{queue = QueueNameBin, handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait}, - _, State = #ch{consumer_mapping = ConsumerMapping }) -> + _, State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors}) -> OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag}, case dict:find(ConsumerTag, ConsumerMapping) of error -> %% Spec requires we ignore this situation. return_ok(State, NoWait, OkMsg); - {ok, QueueName} -> - NewState = State#ch{consumer_mapping = - dict:erase(ConsumerTag, - ConsumerMapping)}, + {ok, {QueueName, MRef}} -> + ConsumerMonitors1 = + case MRef of + undefined -> ConsumerMonitors; + _ -> true = erlang:demonitor(MRef), + dict:erase(MRef, ConsumerMonitors) + end, + NewState = State#ch{consumer_mapping = dict:erase(ConsumerTag, + ConsumerMapping), + consumer_monitors = ConsumerMonitors1}, case rabbit_amqqueue:with( QueueName, fun (Q) -> @@ -1208,7 +1231,7 @@ limit_queues(LPid, #ch{consumer_mapping = Consumers}) -> consumer_queues(Consumers) -> [QPid || QueueName <- sets:to_list( - dict:fold(fun (_ConsumerTag, QueueName, S) -> + dict:fold(fun (_ConsumerTag, {QueueName, _MRef}, S) -> sets:add_element(QueueName, S) end, sets:new(), Consumers)), case rabbit_amqqueue:lookup(QueueName) of -- cgit v1.2.1 From eec95bf77fd15eb16e44e9938e3cd1c857105ec2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 17:20:01 +0000 Subject: Tidy up on DOWN and emit basic.cancel --- src/rabbit_channel.erl | 46 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 346ec371..1da8c959 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -281,20 +281,15 @@ handle_cast({confirm, MsgSeqNos, From}, State) -> handle_info(timeout, State) -> noreply(State); -handle_info({'DOWN', _MRef, process, QPid, Reason}, - State = #ch{unconfirmed = UC}) -> - %% TODO: this does a complete scan and partial rebuild of the - %% tree, which is quite efficient. To do better we'd need to - %% maintain a secondary mapping, from QPids to MsgSeqNos. - {MXs, UC1} = remove_queue_unconfirmed( - gb_trees:next(gb_trees:iterator(UC)), QPid, - {[], UC}, State), - erase_queue_stats(QPid), - State1 = case Reason of - normal -> record_confirms(MXs, State#ch{unconfirmed = UC1}); - _ -> send_nacks(MXs, State#ch{unconfirmed = UC1}) - end, - noreply(queue_blocked(QPid, State1)). +handle_info({'DOWN', MRef, process, QPid, Reason}, + State = #ch{consumer_monitors = ConsumerMonitors}) -> + noreply( + case dict:find(MRef, ConsumerMonitors) of + error -> + handle_non_consumer_down(QPid, Reason, State); + {ok, ConsumerTag} -> + handle_consumer_down(MRef, ConsumerTag, State) + end). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> ok = clear_permission_cache(), @@ -1061,6 +1056,29 @@ handle_method(_MethodRecord, _Content, _State) -> %%---------------------------------------------------------------------------- +handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed = UC}) -> + %% TODO: this does a complete scan and partial rebuild of the + %% tree, which is quite efficient. To do better we'd need to + %% maintain a secondary mapping, from QPids to MsgSeqNos. + {MXs, UC1} = remove_queue_unconfirmed( + gb_trees:next(gb_trees:iterator(UC)), QPid, + {[], UC}, State), + erase_queue_stats(QPid), + State1 = case Reason of + normal -> record_confirms(MXs, State#ch{unconfirmed = UC1}); + _ -> send_nacks(MXs, State#ch{unconfirmed = UC1}) + end, + queue_blocked(QPid, State1). + +handle_consumer_down(MRef, ConsumerTag, + State = #ch{consumer_monitors = ConsumerMonitors, + writer_pid = WriterPid}) -> + ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors), + Cancel = #'basic.cancel'{consumer_tag = ConsumerTag, + nowait = true}, + ok = rabbit_writer:send_command(WriterPid, Cancel), + State#ch{consumer_monitors = ConsumerMonitors1}. + binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath, -- cgit v1.2.1 From 58947e087dc17246a3a973b7a912321ffe3e0804 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Feb 2011 17:32:55 +0000 Subject: Add to our own server_properties (informational only) --- src/rabbit_reader.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index e9ff97f9..be5a90af 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -249,9 +249,10 @@ server_properties(Protocol) -> NormalizedConfigServerProps). server_capabilities(rabbit_framing_amqp_0_9_1) -> - [{<<"publisher_confirms">>, bool, true}, - {<<"exchange_exchange_bindings">>, bool, true}, - {<<"basic.nack">>, bool, true}]; + [{<<"publisher_confirms">>, bool, true}, + {<<"exchange_exchange_bindings">>, bool, true}, + {<<"basic.nack">>, bool, true}, + {<<"consumer_death_notification">>, bool, true}]; server_capabilities(_) -> []. -- cgit v1.2.1 From 78dad489959a02769a7452ea9b6e604de9dfeec6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Feb 2011 12:32:12 +0000 Subject: Changes by ryandesign in r75667 of Macports' SVN. SVN commit msg was: rabbitmq-server: maintainer update to 2.3.1; see #28254 Additionally: 'I simplified how the manpages were installed (using "`xinstall -W`" to install multiple files to the same destination directory at once), and simplified how the checksums were specified (listing each filename only once).' --- packaging/macports/Portfile.in | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 862a0d1a..67ebcf78 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -23,12 +23,14 @@ distfiles ${name}-${version}${extract.suffix} \ ${name}-generic-unix-${version}${extract.suffix} checksums \ - ${name}-${version}${extract.suffix} md5 @md5-src@ \ - ${name}-${version}${extract.suffix} sha1 @sha1-src@ \ - ${name}-${version}${extract.suffix} rmd160 @rmd160-src@ \ - ${name}-generic-unix-${version}${extract.suffix} md5 @md5-bin@ \ - ${name}-generic-unix-${version}${extract.suffix} sha1 @sha1-bin@ \ - ${name}-generic-unix-${version}${extract.suffix} rmd160 @rmd160-bin@ + ${name}-${version}${extract.suffix} \ + md5 @md5-src@ \ + sha1 @sha1-src@ \ + rmd160 @rmd160-src@ \ + ${name}-generic-unix-${version}${extract.suffix} \ + md5 @md5-bin@ \ + sha1 @sha1-bin@ \ + rmd160 @rmd160-bin@ depends_lib port:erlang depends_build port:libxslt @@ -102,10 +104,8 @@ post-destroot { file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmq-server file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmqctl - file copy ${mansrc}/man1/rabbitmq-multi.1.gz ${mandest}/man1/ - file copy ${mansrc}/man1/rabbitmq-server.1.gz ${mandest}/man1/ - file copy ${mansrc}/man1/rabbitmqctl.1.gz ${mandest}/man1/ - file copy ${mansrc}/man5/rabbitmq-env.conf.5.gz ${mandest}/man5/ + xinstall -m 644 -W ${mansrc}/man1 rabbitmq-multi.1.gz rabbitmq-server.1.gz rabbitmqctl.1.gz ${mandest}/man1/ + xinstall -m 644 -W ${mansrc}/man5 rabbitmq.conf.5.gz ${mandest}/man5/ } pre-install { -- cgit v1.2.1 From d8c2900d40317202aa509ef18116c7058ddc7f16 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Feb 2011 17:42:56 +0000 Subject: Make the consumer mapping store the queues, not the queuenames --- src/rabbit_channel.erl | 47 +++++++++++++++++------------------------------ 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 2fc19256..ff8ff800 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -680,9 +680,9 @@ handle_method(#'basic.consume'{queue = QueueNameBin, ActualConsumerTag, ExclusiveConsume, ok_msg(NoWait, #'basic.consume_ok'{ consumer_tag = ActualConsumerTag})), - Q#amqqueue.pid} + Q} end) of - {ok, QPid} -> + {ok, Q = #amqqueue{pid = QPid}} -> {ConsumerMonitors1, MRef} = case rabbit_misc:table_lookup( Capabilities, @@ -696,10 +696,10 @@ handle_method(#'basic.consume'{queue = QueueNameBin, end, {noreply, State#ch{consumer_mapping = dict:store(ActualConsumerTag, - {QueueName, MRef}, + {Q, MRef}, ConsumerMapping), consumer_monitors = ConsumerMonitors1}}; - {{error, exclusive_consume_unavailable}, _QPid} -> + {{error, exclusive_consume_unavailable}, _Q} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", [rabbit_misc:rs(QueueName)]) @@ -719,7 +719,7 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, error -> %% Spec requires we ignore this situation. return_ok(State, NoWait, OkMsg); - {ok, {QueueName, MRef}} -> + {ok, {Q, MRef}} -> ConsumerMonitors1 = case MRef of undefined -> ConsumerMonitors; @@ -729,21 +729,15 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, NewState = State#ch{consumer_mapping = dict:erase(ConsumerTag, ConsumerMapping), consumer_monitors = ConsumerMonitors1}, - case rabbit_amqqueue:with( - QueueName, - fun (Q) -> - %% In order to ensure that no more messages - %% are sent to the consumer after the - %% cancel_ok has been sent, we get the - %% queue process to send the cancel_ok on - %% our behalf. If we were sending the - %% cancel_ok ourselves it might overtake a - %% message sent previously by the queue. - rabbit_amqqueue:basic_cancel( - Q, self(), ConsumerTag, - ok_msg(NoWait, #'basic.cancel_ok'{ - consumer_tag = ConsumerTag})) - end) of + %% In order to ensure that no more messages are sent to + %% the consumer after the cancel_ok has been sent, we get + %% the queue process to send the cancel_ok on our + %% behalf. If we were sending the cancel_ok ourselves it + %% might overtake a message sent previously by the queue. + case rabbit_amqqueue:basic_cancel( + Q, self(), ConsumerTag, + ok_msg(NoWait, #'basic.cancel_ok'{ + consumer_tag = ConsumerTag})) of ok -> {noreply, NewState}; {error, not_found} -> @@ -1262,16 +1256,9 @@ limit_queues(LPid, #ch{consumer_mapping = Consumers}) -> rabbit_amqqueue:limit_all(consumer_queues(Consumers), self(), LPid). consumer_queues(Consumers) -> - [QPid || QueueName <- - sets:to_list( - dict:fold(fun (_ConsumerTag, {QueueName, _MRef}, S) -> - sets:add_element(QueueName, S) - end, sets:new(), Consumers)), - case rabbit_amqqueue:lookup(QueueName) of - {ok, Q} -> QPid = Q#amqqueue.pid, true; - %% queue has been deleted in the meantime - {error, not_found} -> QPid = none, false - end]. + lists:usort([QPid || + {_Key, {#amqqueue{pid = QPid}, _MRef}} + <- dict:to_list(Consumers)]). %% tell the limiter about the number of acks that have been received %% for messages delivered to subscribed consumers, but not acks for -- cgit v1.2.1 From 0a841c886b0941b534de7f5fb32405e910c44173 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Feb 2011 18:28:16 +0000 Subject: Set up the monitor only when we see the basic.consume_ok coming back through otherwise there's a risk of sending out the cancel before we've sent out the consume_ok, which would surprise clients. This is further complicated by the fact NoWait with basic.consume... --- src/rabbit_channel.erl | 74 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 26 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index ff8ff800..b8788983 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -241,6 +241,11 @@ handle_cast({flushed, QPid}, State) -> handle_cast(terminate, State) -> {stop, normal, State}; +handle_cast({command, #'basic.consume_ok'{consumer_tag = ConsumerTag} = Msg}, + State = #ch{writer_pid = WriterPid}) -> + ok = rabbit_writer:send_command(WriterPid, Msg), + noreply(monitor_consumer(ConsumerTag, State)); + handle_cast({command, Msg}, State = #ch{writer_pid = WriterPid}) -> ok = rabbit_writer:send_command(WriterPid, Msg), noreply(State); @@ -656,9 +661,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, nowait = NoWait}, _, State = #ch{reader_pid = ReaderPid, limiter_pid = LimiterPid, - consumer_mapping = ConsumerMapping, - consumer_monitors = ConsumerMonitors, - capabilities = Capabilities}) -> + consumer_mapping = ConsumerMapping}) -> case dict:find(ConsumerTag, ConsumerMapping) of error -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), @@ -682,23 +685,13 @@ handle_method(#'basic.consume'{queue = QueueNameBin, consumer_tag = ActualConsumerTag})), Q} end) of - {ok, Q = #amqqueue{pid = QPid}} -> - {ConsumerMonitors1, MRef} = - case rabbit_misc:table_lookup( - Capabilities, - <<"consumer_death_notification">>) of - {bool, true} -> - MRef1 = erlang:monitor(process, QPid), - {dict:store(MRef1, ActualConsumerTag, - ConsumerMonitors), MRef1}; - _ -> - {ConsumerMonitors, undefined} - end, - {noreply, State#ch{consumer_mapping = - dict:store(ActualConsumerTag, - {Q, MRef}, - ConsumerMapping), - consumer_monitors = ConsumerMonitors1}}; + {ok, Q} -> + State1 = State#ch{consumer_mapping = + dict:store(ActualConsumerTag, + {Q, undefined}, + ConsumerMapping)}, + {noreply, + maybe_monitor_consumer(NoWait, ActualConsumerTag, State1)}; {{error, exclusive_consume_unavailable}, _Q} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -734,10 +727,14 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, %% the queue process to send the cancel_ok on our %% behalf. If we were sending the cancel_ok ourselves it %% might overtake a message sent previously by the queue. - case rabbit_amqqueue:basic_cancel( - Q, self(), ConsumerTag, - ok_msg(NoWait, #'basic.cancel_ok'{ - consumer_tag = ConsumerTag})) of + case rabbit_misc:with_exit_handler( + fun () -> {error, not_found} end, + fun () -> + rabbit_amqqueue:basic_cancel( + Q, self(), ConsumerTag, + ok_msg(NoWait, #'basic.cancel_ok'{ + consumer_tag = ConsumerTag})) + end) of ok -> {noreply, NewState}; {error, not_found} -> @@ -1062,6 +1059,28 @@ handle_method(_MethodRecord, _Content, _State) -> %%---------------------------------------------------------------------------- +maybe_monitor_consumer(true, ConsumerTag, State) -> + monitor_consumer(ConsumerTag, State); +maybe_monitor_consumer(false, _ConsumerTag, State) -> + State. + +monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, + capabilities = Capabilities}) -> + case {dict:find(ConsumerTag, ConsumerMapping), + rabbit_misc:table_lookup( + Capabilities, <<"consumer_death_notification">>)} of + {{ok, {#amqqueue{pid = QPid} = Q, undefined}}, {bool, true}} -> + MRef = erlang:monitor(process, QPid), + State#ch{consumer_mapping = + dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), + consumer_monitors = + dict:store(MRef, ConsumerTag, ConsumerMonitors)}; + _X -> + %% either already received the cancel or incapable client + State + end. + handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> MsgSeqNos = case gb_trees:lookup(QPid, UQM) of {value, MsgSet} -> gb_sets:to_list(MsgSet); @@ -1080,13 +1099,16 @@ handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> queue_blocked(QPid, State3). handle_consumer_down(MRef, ConsumerTag, - State = #ch{consumer_monitors = ConsumerMonitors, + State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, writer_pid = WriterPid}) -> + ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping), ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors), Cancel = #'basic.cancel'{consumer_tag = ConsumerTag, nowait = true}, ok = rabbit_writer:send_command(WriterPid, Cancel), - State#ch{consumer_monitors = ConsumerMonitors1}. + State#ch{consumer_mapping = ConsumerMapping1, + consumer_monitors = ConsumerMonitors1}. binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, -- cgit v1.2.1 From b4c3bf3e2b677325d05b0cce115214f79aa362b9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Feb 2011 18:40:07 +0000 Subject: Fatal error --- src/rabbit_channel.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b8788983..ec3088dd 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1076,7 +1076,7 @@ monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), consumer_monitors = dict:store(MRef, ConsumerTag, ConsumerMonitors)}; - _X -> + _ -> %% either already received the cancel or incapable client State end. -- cgit v1.2.1 From 4584e37187aba68d53597605b71858350c2e6efa Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sat, 19 Feb 2011 23:34:39 +0000 Subject: Include LSB Default-Start and Default-Stop lines in init script Runlevel uses vary between distros; the runlevels given here correspond to the most common Linux conventions. In particular, runlevel 2 is in the stop list, because in most distros (Debian being a notable exception) it means that network services should not be started. --- packaging/common/rabbitmq-server.init | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index 39d23983..5a43be5d 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -10,8 +10,8 @@ # Provides: rabbitmq-server # Required-Start: $remote_fs $network # Required-Stop: $remote_fs $network -# Default-Start: -# Default-Stop: +# Default-Start: 3 4 5 +# Default-Stop: 0 1 2 6 # Description: RabbitMQ broker # Short-Description: Enable AMQP service provided by RabbitMQ broker ### END INIT INFO -- cgit v1.2.1 From e6b9eb1b1599c01ec553f7a1910834b7ee4fa0da Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sat, 19 Feb 2011 23:36:36 +0000 Subject: Debian needs unusual LSB Default-Start and Default-Stop lines For Debian and descendants, there is no distinction between runlevel 2 and 3, 4, and 5. So we need to modify the Default-Start and Default-Stop lines in the init script accordingly. --- packaging/debs/Debian/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packaging/debs/Debian/Makefile b/packaging/debs/Debian/Makefile index ab05f732..221dbe4f 100644 --- a/packaging/debs/Debian/Makefile +++ b/packaging/debs/Debian/Makefile @@ -22,9 +22,13 @@ package: clean tar -zxvf $(DEBIAN_ORIG_TARBALL) cp -r debian $(UNPACKED_DIR) cp $(COMMON_DIR)/* $(UNPACKED_DIR)/debian/ +# Debian and descendants differ from most other distros in that +# runlevel 2 should start network services. sed -i \ -e 's|^DEFAULTS_FILE=.*$$|DEFAULTS_FILE=/etc/default/rabbitmq|' \ -e 's|^LOCK_FILE=.*$$|LOCK_FILE=|' \ + -e 's|^\(# Default-Start:\).*$$|\1 2 3 4 5|' \ + -e 's|^\(# Default-Stop:\).*$$|\1 0 1 6|' \ $(UNPACKED_DIR)/debian/rabbitmq-server.init sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \ $(UNPACKED_DIR)/debian/rabbitmq-script-wrapper -- cgit v1.2.1 From 67f728639d1a0923cfab0952e437d5855a062a71 Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sat, 19 Feb 2011 23:37:54 +0000 Subject: Obey Fedora guidelines regarding Default-Start and Default-Stop Fedora guidelines say that non-vital packages should omit Default-Start and Default-Stop. --- packaging/RPMS/Fedora/Makefile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packaging/RPMS/Fedora/Makefile b/packaging/RPMS/Fedora/Makefile index 74a1800a..2c0f9a6c 100644 --- a/packaging/RPMS/Fedora/Makefile +++ b/packaging/RPMS/Fedora/Makefile @@ -12,7 +12,7 @@ ifndef RPM_OS RPM_OS=fedora endif -ifeq "x$(RPM_OS)" "xsuse" +ifeq "$(RPM_OS)" "suse" REQUIRES=/sbin/chkconfig /sbin/service OS_DEFINES=--define '_initrddir /etc/init.d' --define 'dist .suse' else @@ -34,6 +34,11 @@ prepare: -e 's|^DEFAULTS_FILE=.*$$|DEFAULTS_FILE=/etc/sysconfig/rabbitmq|' \ -e 's|^LOCK_FILE=.*$$|LOCK_FILE=/var/lock/subsys/$$NAME|' \ SOURCES/rabbitmq-server.init +ifeq "$(RPM_OS)" "fedora" +# Fedora says that only vital services should have Default-Start + sed -i -e '/^# Default-Start:/d;/^# Default-Stop:/d' \ + SOURCES/rabbitmq-server.init +endif sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \ SOURCES/rabbitmq-script-wrapper cp rabbitmq-server.logrotate SOURCES/rabbitmq-server.logrotate @@ -41,5 +46,5 @@ prepare: server: prepare rpmbuild -ba --nodeps SPECS/rabbitmq-server.spec $(DEFINES) $(OS_DEFINES) -clean: +clean: rm -rf SOURCES SPECS RPMS SRPMS BUILD tmp -- cgit v1.2.1 From dfb0d30bde61730a5b6ddd5a51a98a39d447cba6 Mon Sep 17 00:00:00 2001 From: David Wragg Date: Sun, 20 Feb 2011 00:11:18 +0000 Subject: Fix "File listed twice" warnings when building RPMs --- packaging/RPMS/Fedora/rabbitmq-server.spec | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index 5d573bde..79c9607c 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -65,12 +65,8 @@ mkdir -p %{buildroot}%{_sysconfdir}/rabbitmq rm %{_maindir}/LICENSE %{_maindir}/LICENSE-MPL-RabbitMQ %{_maindir}/INSTALL #Build the list of files -rm -f %{_builddir}/%{name}.files -echo '%defattr(-,root,root, -)' >> %{_builddir}/%{name}.files -(cd %{buildroot}; \ - find . -type f ! -regex '\.%{_sysconfdir}.*' \ - ! -regex '\.\(%{_rabbit_erllibdir}\|%{_rabbit_libdir}\).*' \ - | sed -e 's/^\.//' >> %{_builddir}/%{name}.files) +echo '%defattr(-,root,root, -)' >%{_builddir}/%{name}.files +find %{buildroot} -path %{buildroot}%{_sysconfdir} -prune -o '!' -type d -printf "/%%P\n" >>%{_builddir}/%{name}.files %pre @@ -117,8 +113,6 @@ done %attr(0750, rabbitmq, rabbitmq) %dir %{_localstatedir}/lib/rabbitmq %attr(0750, rabbitmq, rabbitmq) %dir %{_localstatedir}/log/rabbitmq %dir %{_sysconfdir}/rabbitmq -%{_rabbit_erllibdir} -%{_rabbit_libdir} %{_initrddir}/rabbitmq-server %config(noreplace) %{_sysconfdir}/logrotate.d/rabbitmq-server %doc LICENSE LICENSE-MPL-RabbitMQ -- cgit v1.2.1 From 95f2121cc5e3da61960c06ab95258074a26b531b Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Sun, 20 Feb 2011 16:37:07 -0600 Subject: less wait_for_tables --- src/rabbit_mnesia.erl | 7 +++---- src/rabbit_upgrade.erl | 1 - src/rabbit_upgrade_functions.erl | 5 +++++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index eac7dd14..8b8f2f2d 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -20,7 +20,7 @@ -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, cluster/1, force_cluster/1, reset/0, force_reset/0, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, - empty_ram_only_tables/0, copy_db/1]). + empty_ram_only_tables/0, copy_db/1, wait_for_tables/1]). -export([table_names/0]). @@ -54,6 +54,7 @@ -spec(empty_ram_only_tables/0 :: () -> 'ok'). -spec(create_tables/0 :: () -> 'ok'). -spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())). +-spec(wait_for_tables/1 :: ([atom()]) -> 'ok'). -endif. @@ -386,7 +387,6 @@ init_db(ClusterNodes, Force) -> case {Nodes, mnesia:system_info(use_dir), all_clustered_nodes()} of {[], true, [_]} -> %% True single disc node, attempt upgrade - ok = wait_for_tables(), case rabbit_upgrade:maybe_upgrade() of ok -> ok = wait_for_tables(), ensure_schema_ok(); @@ -490,8 +490,7 @@ copy_db(Destination) -> mnesia:stop(), case rabbit_misc:recursive_copy(dir(), Destination) of ok -> - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = wait_for_tables(); + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia); {error, E} -> {error, E} end. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b0a71523..89acc10c 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -98,7 +98,6 @@ vertices(Module, Steps) -> edges(_Module, Steps) -> [{Require, StepName} || {StepName, Requires} <- Steps, Require <- Requires]. - unknown_heads(Heads, G) -> [H || H <- Heads, digraph:vertex(G, H) =:= false]. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 36d1f2dc..d6a79590 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -49,6 +49,7 @@ %% point. remove_user_scope() -> + rabbit_mnesia:wait_for_tables([rabbit_user_permission]), transform( rabbit_user_permission, fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) -> @@ -57,6 +58,7 @@ remove_user_scope() -> [user_vhost, permission]). hash_passwords() -> + rabbit_mnesia:wait_for_tables([rabbit_user]), transform( rabbit_user, fun ({user, Username, Password, IsAdmin}) -> @@ -66,6 +68,7 @@ hash_passwords() -> [username, password_hash, is_admin]). add_ip_to_listener() -> + rabbit_mnesia:wait_for_tables([rabbit_listener]), transform( rabbit_listener, fun ({listener, Node, Protocol, Host, Port}) -> @@ -75,6 +78,7 @@ add_ip_to_listener() -> internal_exchanges() -> Tables = [rabbit_exchange, rabbit_durable_exchange], + rabbit_mnesia:wait_for_tables(Tables), AddInternalFun = fun ({exchange, Name, Type, Durable, AutoDelete, Args}) -> {exchange, Name, Type, Durable, AutoDelete, false, Args} @@ -86,6 +90,7 @@ internal_exchanges() -> ok. user_to_internal_user() -> + rabbit_mnesia:wait_for_tables([rabbit_user]), transform( rabbit_user, fun({user, Username, PasswordHash, IsAdmin}) -> -- cgit v1.2.1 From 5013d8833776d927e034b8dc270659bdc2fadd60 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Sun, 20 Feb 2011 16:44:56 -0600 Subject: moving wait_for_tables call in transform --- src/rabbit_upgrade_functions.erl | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index d6a79590..b9dbe418 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -49,7 +49,6 @@ %% point. remove_user_scope() -> - rabbit_mnesia:wait_for_tables([rabbit_user_permission]), transform( rabbit_user_permission, fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) -> @@ -58,7 +57,6 @@ remove_user_scope() -> [user_vhost, permission]). hash_passwords() -> - rabbit_mnesia:wait_for_tables([rabbit_user]), transform( rabbit_user, fun ({user, Username, Password, IsAdmin}) -> @@ -68,7 +66,6 @@ hash_passwords() -> [username, password_hash, is_admin]). add_ip_to_listener() -> - rabbit_mnesia:wait_for_tables([rabbit_listener]), transform( rabbit_listener, fun ({listener, Node, Protocol, Host, Port}) -> @@ -78,7 +75,6 @@ add_ip_to_listener() -> internal_exchanges() -> Tables = [rabbit_exchange, rabbit_durable_exchange], - rabbit_mnesia:wait_for_tables(Tables), AddInternalFun = fun ({exchange, Name, Type, Durable, AutoDelete, Args}) -> {exchange, Name, Type, Durable, AutoDelete, false, Args} @@ -90,7 +86,6 @@ internal_exchanges() -> ok. user_to_internal_user() -> - rabbit_mnesia:wait_for_tables([rabbit_user]), transform( rabbit_user, fun({user, Username, PasswordHash, IsAdmin}) -> @@ -109,10 +104,12 @@ topic_trie() -> %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> + rabbit_mnesia:wait_for_tables([TableName]), {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList), ok. transform(TableName, Fun, FieldList, NewRecordName) -> + rabbit_mnesia:wait_for_tables([TableName]), {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList, NewRecordName), ok. -- cgit v1.2.1 From be8cb807748f28021d38c62f158f095874d9d607 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Feb 2011 17:36:09 +0000 Subject: renames and refactors --- src/rabbit_channel.erl | 24 +++++++++++------------- src/rabbit_reader.erl | 8 ++++---- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index abda1c1f..28f3673d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -299,7 +299,7 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, noreply( case dict:find(MRef, ConsumerMonitors) of error -> - handle_non_consumer_down(QPid, Reason, State); + handle_queue_down(QPid, Reason, State); {ok, ConsumerTag} -> handle_consumer_down(MRef, ConsumerTag, State) end). @@ -717,7 +717,10 @@ handle_method(#'basic.consume'{queue = QueueNameBin, {Q, undefined}, ConsumerMapping)}, {noreply, - maybe_monitor_consumer(NoWait, ActualConsumerTag, State1)}; + case NoWait of + true -> monitor_consumer(ActualConsumerTag, State1); + false -> State1 + end}; {{error, exclusive_consume_unavailable}, _Q} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -1085,29 +1088,24 @@ handle_method(_MethodRecord, _Content, _State) -> %%---------------------------------------------------------------------------- -maybe_monitor_consumer(true, ConsumerTag, State) -> - monitor_consumer(ConsumerTag, State); -maybe_monitor_consumer(false, _ConsumerTag, State) -> - State. - monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, consumer_monitors = ConsumerMonitors, capabilities = Capabilities}) -> - case {dict:find(ConsumerTag, ConsumerMapping), - rabbit_misc:table_lookup( - Capabilities, <<"consumer_death_notification">>)} of - {{ok, {#amqqueue{pid = QPid} = Q, undefined}}, {bool, true}} -> + {#amqqueue{pid = QPid} = Q, undefined} = dict:fetch(ConsumerTag, + ConsumerMapping), + case rabbit_misc:table_lookup( + Capabilities, <<"consumer_cancel_notify">>) of + {bool, true} -> MRef = erlang:monitor(process, QPid), State#ch{consumer_mapping = dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), consumer_monitors = dict:store(MRef, ConsumerTag, ConsumerMonitors)}; _ -> - %% either already received the cancel or incapable client State end. -handle_non_consumer_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> +handle_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> MsgSeqNos = case gb_trees:lookup(QPid, UQM) of {value, MsgSet} -> gb_sets:to_list(MsgSet); none -> [] diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index c5d6ecc4..aa7d2775 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -163,10 +163,10 @@ server_properties(Protocol) -> NormalizedConfigServerProps). server_capabilities(rabbit_framing_amqp_0_9_1) -> - [{<<"publisher_confirms">>, bool, true}, - {<<"exchange_exchange_bindings">>, bool, true}, - {<<"basic.nack">>, bool, true}, - {<<"consumer_death_notification">>, bool, true}]; + [{<<"publisher_confirms">>, bool, true}, + {<<"exchange_exchange_bindings">>, bool, true}, + {<<"basic.nack">>, bool, true}, + {<<"consumer_cancel_notify">>, bool, true}]; server_capabilities(_) -> []. -- cgit v1.2.1 From bb9d2725ae586f40a6868b6af7ba4980e2ae3725 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Feb 2011 18:15:44 +0000 Subject: cosmetic and code movement --- src/rabbit_channel.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 28f3673d..fe6522fe 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -298,10 +298,8 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, State = #ch{consumer_monitors = ConsumerMonitors}) -> noreply( case dict:find(MRef, ConsumerMonitors) of - error -> - handle_queue_down(QPid, Reason, State); - {ok, ConsumerTag} -> - handle_consumer_down(MRef, ConsumerTag, State) + error -> handle_queue_down(QPid, Reason, State); + {ok, ConsumerTag} -> handle_consumer_down(MRef, ConsumerTag, State) end). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> @@ -1091,11 +1089,11 @@ handle_method(_MethodRecord, _Content, _State) -> monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, consumer_monitors = ConsumerMonitors, capabilities = Capabilities}) -> - {#amqqueue{pid = QPid} = Q, undefined} = dict:fetch(ConsumerTag, - ConsumerMapping), case rabbit_misc:table_lookup( Capabilities, <<"consumer_cancel_notify">>) of {bool, true} -> + {#amqqueue{pid = QPid} = Q, undefined} = + dict:fetch(ConsumerTag, ConsumerMapping), MRef = erlang:monitor(process, QPid), State#ch{consumer_mapping = dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping), -- cgit v1.2.1 From a25d080a27495b7306a282086e3e2c1ccb7d86be Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Feb 2011 11:15:28 +0000 Subject: Make sure logging is working if we're about to actually do something. --- src/rabbit_upgrade.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9f33fd03..dd19de19 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -107,6 +107,7 @@ maybe_upgrade_mnesia() -> KnownDiscNodes = rabbit_mnesia:read_cluster_nodes_config(), case upgrades_required(mnesia) of version_not_available -> + rabbit:prepare(), %% Ensure we have logs for this case AllNodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " @@ -116,6 +117,7 @@ maybe_upgrade_mnesia() -> [] -> ok; Upgrades -> + rabbit:prepare(), %% Ensure we have logs for this case upgrade_mode(AllNodes, KnownDiscNodes) of primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(KnownDiscNodes) -- cgit v1.2.1 From bd6e51846b5fbbb6d407f0f1482b054563e1cecc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Feb 2011 12:50:49 +0000 Subject: Don't look at the cluster config, it is not trustworthy (for what we want). --- src/rabbit_mnesia.erl | 2 +- src/rabbit_upgrade.erl | 33 +++++++++++++++++++-------------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 97c4d11e..68654e46 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -18,7 +18,7 @@ -module(rabbit_mnesia). -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, - cluster/1, force_cluster/1, reset/0, force_reset/0, + cluster/1, force_cluster/1, reset/0, force_reset/0, init_db/2, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dd19de19..f1f0d6d3 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -104,7 +104,6 @@ maybe_upgrade_mnesia() -> AllNodes = rabbit_mnesia:all_clustered_nodes(), - KnownDiscNodes = rabbit_mnesia:read_cluster_nodes_config(), case upgrades_required(mnesia) of version_not_available -> rabbit:prepare(), %% Ensure we have logs for this @@ -118,18 +117,18 @@ maybe_upgrade_mnesia() -> ok; Upgrades -> rabbit:prepare(), %% Ensure we have logs for this - case upgrade_mode(AllNodes, KnownDiscNodes) of + case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); - secondary -> secondary_upgrade(KnownDiscNodes) + secondary -> secondary_upgrade(AllNodes) end end, ok = rabbit_mnesia:delete_previous_run_disc_nodes(). -upgrade_mode(AllNodes, KnownDiscNodes) -> +upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), - case {am_i_disc_node(KnownDiscNodes), AfterUs} of + case {am_i_disc_node(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -170,13 +169,11 @@ upgrade_mode(AllNodes, KnownDiscNodes) -> end end. -am_i_disc_node(KnownDiscNodes) -> - %% The cluster config does not list all disc nodes, but it will list us - %% if we're one. - case KnownDiscNodes of - [] -> true; - DiscNodes -> lists:member(node(), DiscNodes) - end. +am_i_disc_node() -> + %% This is pretty ugly but we can't start Mnesia and ask it (will hang), + %% we can't look at the config file (may not include us even if we're a + %% disc node). + filelib:is_regular(rabbit_mnesia:dir() ++ "/rabbit_durable_exchange.DCD"). die(Msg, Args) -> %% We don't throw or exit here since that gets thrown @@ -207,10 +204,18 @@ primary_upgrade(Upgrades, Nodes) -> force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. -secondary_upgrade(KnownDiscNodes) -> +secondary_upgrade(AllNodes) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), - ok = rabbit_mnesia:create_cluster_nodes_config(KnownDiscNodes), + %% Note that we cluster with all nodes, rather than all disc nodes + %% (as we can't know all disc nodes at this point). This is safe as + %% we're not writing the cluster config, just setting up Mnesia. + ClusterNodes = case am_i_disc_node() of + true -> AllNodes; + false -> AllNodes -- [node()] + end, + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + rabbit_mnesia:init_db(ClusterNodes, true), write_version(mnesia), ok. -- cgit v1.2.1 From 5b70262f2421af39e76b29b57fef44375ea44c9b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Feb 2011 14:41:24 +0000 Subject: Revert d3fd719c5287 (Remove should_offer/1). --- include/rabbit_auth_mechanism_spec.hrl | 1 + src/rabbit_auth_mechanism.erl | 4 ++++ src/rabbit_auth_mechanism_amqplain.erl | 5 ++++- src/rabbit_auth_mechanism_cr_demo.erl | 5 ++++- src/rabbit_auth_mechanism_plain.erl | 5 ++++- src/rabbit_reader.erl | 18 +++++++++--------- 6 files changed, 26 insertions(+), 12 deletions(-) diff --git a/include/rabbit_auth_mechanism_spec.hrl b/include/rabbit_auth_mechanism_spec.hrl index 49614d5f..614a3eed 100644 --- a/include/rabbit_auth_mechanism_spec.hrl +++ b/include/rabbit_auth_mechanism_spec.hrl @@ -17,6 +17,7 @@ -ifdef(use_specs). -spec(description/0 :: () -> [{atom(), any()}]). +-spec(should_offer/1 :: (rabbit_net:socket()) -> boolean()). -spec(init/1 :: (rabbit_net:socket()) -> any()). -spec(handle_response/2 :: (binary(), any()) -> {'ok', rabbit_types:user()} | diff --git a/src/rabbit_auth_mechanism.erl b/src/rabbit_auth_mechanism.erl index 1d14f9f0..897199ee 100644 --- a/src/rabbit_auth_mechanism.erl +++ b/src/rabbit_auth_mechanism.erl @@ -23,6 +23,10 @@ behaviour_info(callbacks) -> %% A description. {description, 0}, + %% If this mechanism is enabled, should it be offered for a given socket? + %% (primarily so EXTERNAL can be SSL-only) + {should_offer, 1}, + %% Called before authentication starts. Should create a state %% object to be passed through all the stages of authentication. {init, 1}, diff --git a/src/rabbit_auth_mechanism_amqplain.erl b/src/rabbit_auth_mechanism_amqplain.erl index 5e422eee..2168495d 100644 --- a/src/rabbit_auth_mechanism_amqplain.erl +++ b/src/rabbit_auth_mechanism_amqplain.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_auth_mechanism). --export([description/0, init/1, handle_response/2]). +-export([description/0, should_offer/1, init/1, handle_response/2]). -include("rabbit_auth_mechanism_spec.hrl"). @@ -38,6 +38,9 @@ description() -> [{name, <<"AMQPLAIN">>}, {description, <<"QPid AMQPLAIN mechanism">>}]. +should_offer(_Sock) -> + true. + init(_Sock) -> []. diff --git a/src/rabbit_auth_mechanism_cr_demo.erl b/src/rabbit_auth_mechanism_cr_demo.erl index 7fd20f8b..77aa34ea 100644 --- a/src/rabbit_auth_mechanism_cr_demo.erl +++ b/src/rabbit_auth_mechanism_cr_demo.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_auth_mechanism). --export([description/0, init/1, handle_response/2]). +-export([description/0, should_offer/1, init/1, handle_response/2]). -include("rabbit_auth_mechanism_spec.hrl"). @@ -43,6 +43,9 @@ description() -> {description, <<"RabbitMQ Demo challenge-response authentication " "mechanism">>}]. +should_offer(_Sock) -> + true. + init(_Sock) -> #state{}. diff --git a/src/rabbit_auth_mechanism_plain.erl b/src/rabbit_auth_mechanism_plain.erl index 1ca07018..e2f9bff9 100644 --- a/src/rabbit_auth_mechanism_plain.erl +++ b/src/rabbit_auth_mechanism_plain.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_auth_mechanism). --export([description/0, init/1, handle_response/2]). +-export([description/0, should_offer/1, init/1, handle_response/2]). -include("rabbit_auth_mechanism_spec.hrl"). @@ -41,6 +41,9 @@ description() -> [{name, <<"PLAIN">>}, {description, <<"SASL PLAIN authentication mechanism">>}]. +should_offer(_Sock) -> + true. + init(_Sock) -> []. diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 3908b646..29321c60 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -564,7 +564,7 @@ start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision}, version_major = ProtocolMajor, version_minor = ProtocolMinor, server_properties = server_properties(Protocol), - mechanisms = auth_mechanisms_binary(), + mechanisms = auth_mechanisms_binary(Sock), locales = <<"en_US">> }, ok = send_on_channel0(Sock, Start, Protocol), switch_callback(State#v1{connection = Connection#connection{ @@ -616,7 +616,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism, State0 = #v1{connection_state = starting, connection = Connection, sock = Sock}) -> - AuthMechanism = auth_mechanism_to_module(Mechanism), + AuthMechanism = auth_mechanism_to_module(Mechanism, Sock), Capabilities = case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of {table, Capabilities1} -> Capabilities1; @@ -709,14 +709,14 @@ handle_method0(_Method, #v1{connection_state = S}) -> send_on_channel0(Sock, Method, Protocol) -> ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol). -auth_mechanism_to_module(TypeBin) -> +auth_mechanism_to_module(TypeBin, Sock) -> case rabbit_registry:binary_to_type(TypeBin) of {error, not_found} -> rabbit_misc:protocol_error( command_invalid, "unknown authentication mechanism '~s'", [TypeBin]); T -> - case {lists:member(T, auth_mechanisms()), + case {lists:member(T, auth_mechanisms(Sock)), rabbit_registry:lookup_module(auth_mechanism, T)} of {true, {ok, Module}} -> Module; @@ -727,15 +727,15 @@ auth_mechanism_to_module(TypeBin) -> end end. -auth_mechanisms() -> +auth_mechanisms(Sock) -> {ok, Configured} = application:get_env(auth_mechanisms), - [Name || {Name, _Module} <- rabbit_registry:lookup_all(auth_mechanism), - lists:member(Name, Configured)]. + [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism), + Module:should_offer(Sock), lists:member(Name, Configured)]. -auth_mechanisms_binary() -> +auth_mechanisms_binary(Sock) -> list_to_binary( string:join( - [atom_to_list(A) || A <- auth_mechanisms()], " ")). + [atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")). auth_phase(Response, State = #v1{auth_mechanism = AuthMechanism, -- cgit v1.2.1 From 102eb1221e34274c2fa54595d3c2fd258645f410 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 22 Feb 2011 14:43:08 +0000 Subject: Sender-specified destinations updates build on R12B3 reduce mnesia lookups --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_basic.erl | 31 +++++++++++++------------- src/rabbit_exchange_type_direct.erl | 3 +-- src/rabbit_exchange_type_fanout.erl | 2 +- src/rabbit_msg_file.erl | 24 ++++++++++---------- src/rabbit_msg_store.erl | 41 +++++++++++++++++++++++++---------- src/rabbit_router.erl | 17 ++++++++++++--- src/rabbit_variable_queue.erl | 32 +++------------------------ 8 files changed, 78 insertions(+), 74 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 17cdedc2..4889abff 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,4 +65,4 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(multiple_routing_keys/0 :: () -> 'ok'). +-spec(store_names/0 :: () -> [atom()]). diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 503f01bc..376a303e 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -31,7 +31,6 @@ -type(publish_result() :: ({ok, rabbit_router:routing_result(), [pid()]} | rabbit_types:error('not_found'))). --type(msg_or_error() :: {'ok', rabbit_types:message()} | {'error', any()}). -spec(publish/1 :: (rabbit_types:delivery()) -> publish_result()). @@ -41,10 +40,11 @@ rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> msg_or_error()). + properties_input(), binary()) -> rabbit_types:message()). -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - rabbit_types:decoded_content()) -> msg_or_error()). + rabbit_types:decoded_content()) -> {'ok', rabbit_types:message()} | + {'error', any()}). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -98,17 +98,19 @@ from_content(Content) -> {Props, list_to_binary(lists:reverse(FragmentsRev))}. %% This breaks the spec rule forbidding message modification +strip_header(#content{properties = #'P_basic'{headers = undefined}} + = DecodedContent, _Key) -> + DecodedContent; strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} - = DecodedContent, Key) when Headers =/= undefined -> - case lists:keyfind(Key, 1, Headers) of - false -> DecodedContent; - Found -> Headers0 = lists:delete(Found, Headers), - rabbit_binary_generator:clear_encoded_content( - DecodedContent#content{ - properties = Props#'P_basic'{headers = Headers0}}) - end; -strip_header(DecodedContent, _Key) -> - DecodedContent. + = DecodedContent, Key) -> + case lists:keysearch(Key, 1, Headers) of + false -> DecodedContent; + {value, Found} -> Headers0 = lists:delete(Found, Headers), + rabbit_binary_generator:clear_encoded_content( + DecodedContent#content{ + properties = Props#'P_basic'{ + headers = Headers0}}) + end. message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> @@ -170,7 +172,7 @@ is_message_persistent(#content{properties = #'P_basic'{ 1 -> false; 2 -> true; undefined -> false; - _ -> false + Other -> throw({error, {delivery_mode_unknown, Other}}) end. % Extract CC routes from headers @@ -185,4 +187,3 @@ header_routes(HeadersTable) -> Type, binary_to_list(HeaderKey)}}) end || HeaderKey <- ?ROUTING_HEADERS]). - diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 82776c4a..349c2f6e 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -37,8 +37,7 @@ description() -> route(#exchange{name = Name}, #delivery{message = #basic_message{routing_keys = Routes}}) -> - lists:append([rabbit_router:match_routing_key(Name, RKey) || - RKey <- Routes]). + rabbit_router:match_routing_key(Name, Routes). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 382fb627..bc5293c8 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -36,7 +36,7 @@ description() -> {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. route(#exchange{name = Name}, _Delivery) -> - rabbit_router:match_routing_key(Name, '_'). + rabbit_router:match_routing_key(Name, ['_']). validate(_X) -> ok. create(_Tx, _X) -> ok. diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 81f2f07e..55e6ac47 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -80,28 +80,28 @@ read(FileHdl, TotalSize) -> end. scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 -> - scan(FileHdl, FileSize, <<>>, 0, Acc, 0, Fun). + scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc). -scan(_FileHdl, FileSize, _Data, FileSize, Acc, ScanOffset, _Fun) -> +scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) -> {ok, Acc, ScanOffset}; -scan(FileHdl, FileSize, Data, ReadOffset, Acc, ScanOffset, Fun) -> +scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) -> Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]), case file_handle_cache:read(FileHdl, Read) of {ok, Data1} -> {Data2, Acc1, ScanOffset1} = - scanner(<>, Acc, ScanOffset, Fun), + scanner(<>, ScanOffset, Fun, Acc), ReadOffset1 = ReadOffset + size(Data1), - scan(FileHdl, FileSize, Data2, ReadOffset1, Acc1, ScanOffset1, Fun); + scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1); _KO -> {ok, Acc, ScanOffset} end. -scanner(<<>>, Acc, Offset, _Fun) -> +scanner(<<>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; -scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Acc, Offset, _Fun) -> +scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; %% Nothing to do other than stop. scanner(<>, Acc, Offset, Fun) -> + WriteMarker:?WRITE_OK_SIZE_BITS, Rest/binary>>, Offset, Fun, Acc) -> TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, case WriteMarker of ?WRITE_OK_MARKER -> @@ -113,10 +113,10 @@ scanner(<> = <>, <> = <>, - scanner(Rest, Fun({Guid, TotalSize, Offset, Msg}, Acc), - Offset + TotalSize, Fun); + scanner(Rest, Offset + TotalSize, Fun, + Fun({Guid, TotalSize, Offset, Msg}, Acc)); _ -> - scanner(Rest, Acc, Offset + TotalSize, Fun) + scanner(Rest, Offset + TotalSize, Fun, Acc) end; -scanner(Data, Acc, Offset, _Fun) -> +scanner(Data, Offset, _Fun, Acc) -> {Data, Acc, Offset}. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index a2f6d7e2..d798c4f7 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -26,7 +26,7 @@ -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal --export([transform_dir/3, force_recovery/2]). %% upgrade +-export([multiple_routing_keys/0]). %% upgrade -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). @@ -106,6 +106,8 @@ %%---------------------------------------------------------------------------- +-rabbit_upgrade({multiple_routing_keys, []}). + -ifdef(use_specs). -export_type([gc_state/0, file_num/0]). @@ -164,9 +166,7 @@ -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). --spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). --spec(transform_dir/3 :: (file:filename(), server(), - fun ((binary()) -> ({'ok', msg()} | {error, any()}))) -> 'ok'). +-spec(multiple_routing_keys/0 :: () -> 'ok'). -endif. @@ -1968,6 +1968,25 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {destination, Destination}]} end. +%%---------------------------------------------------------------------------- +%% upgrade +%%---------------------------------------------------------------------------- + +multiple_routing_keys() -> + [transform_store( + fun ({basic_message, ExchangeName, Routing_Key, Content, + Guid, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + (_) -> {error, corrupt_message} + end, Store) || Store <- rabbit_variable_queue:store_names()], + ok. + +%% Assumes message store is not running +transform_store(TransformFun, Store) -> + force_recovery(rabbit_mnesia:dir(), Store), + transform_dir(rabbit_mnesia:dir(), Store, TransformFun). + force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), @@ -1975,10 +1994,10 @@ force_recovery(BaseDir, Store) -> File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], ok. -for_each_file(D, Fun, Files) -> +foreach_file(D, Fun, Files) -> [Fun(filename:join(D, File)) || File <- Files]. -for_each_file(D1, D2, Fun, Files) -> +foreach_file(D1, D2, Fun, Files) -> [Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> @@ -1988,11 +2007,11 @@ transform_dir(BaseDir, Store, TransformFun) -> case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - for_each_file(Dir, TmpDir, TransformFile, OldFileList), - for_each_file(Dir, fun file:delete/1, OldFileList), + foreach_file(Dir, TmpDir, TransformFile, OldFileList), + foreach_file(Dir, fun file:delete/1, OldFileList), NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - for_each_file(TmpDir, Dir, fun file:copy/2, NewFileList), - for_each_file(TmpDir, fun file:delete/1, NewFileList), + foreach_file(TmpDir, Dir, fun file:copy/2, NewFileList), + foreach_file(TmpDir, fun file:delete/1, NewFileList), ok = file:del_dir(TmpDir) end. @@ -2007,7 +2026,7 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_msg_file:scan( RefOld, Size, fun({Guid, _Size, _Offset, BinMsg}, ok) -> - case TransformFun(BinMsg) of + case TransformFun(binary_to_term(BinMsg)) of {ok, MsgNew} -> {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), ok; diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 692d2473..53e707f4 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -37,7 +37,8 @@ fun ((rabbit_types:binding()) -> boolean())) -> match_result()). -spec(match_routing_key/2 :: (rabbit_types:binding_source(), - routing_key() | '_') -> match_result()). + [routing_key()] | ['_']) -> + match_result()). -endif. @@ -82,12 +83,22 @@ match_bindings(SrcName, Match) -> Match(Binding)]), mnesia:async_dirty(fun qlc:e/1, [Query]). -match_routing_key(SrcName, RoutingKey) -> +match_routing_key(SrcName, [RoutingKey]) -> MatchHead = #route{binding = #binding{source = SrcName, destination = '$1', key = RoutingKey, _ = '_'}}, - mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]). + mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]); +match_routing_key(SrcName, [_|_] = RoutingKeys) -> + Condition = list_to_tuple(['orelse' | [{'=:=', '$2', RKey} || + RKey <- RoutingKeys]]), + MatchHead = #route{binding = #binding{source = SrcName, + destination = '$1', + key = '$2', + _ = '_'}}, + mnesia:dirty_select(rabbit_route, [{MatchHead, [Condition], ['$1']}]). + + %%-------------------------------------------------------------------- diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index b0781f8f..4eb9c3b8 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, multiple_routing_keys/0]). + status/1, store_names/0]). -export([start/1, stop/0]). @@ -294,8 +294,6 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({multiple_routing_keys, []}). - -ifdef(use_specs). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). @@ -1804,29 +1802,5 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) end. -%%---------------------------------------------------------------------------- -%% Upgrading -%%---------------------------------------------------------------------------- - -multiple_routing_keys() -> - transform_storage( - fun (BinMsg) -> - case binary_to_term(BinMsg) of - {basic_message, ExchangeName, Routing_Key, Content, Guid, - Persistent} -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - _ -> - {error, corrupt_message} - end - end), - ok. - -%% Assumes message store is not running -transform_storage(TransformFun) -> - transform_store(?PERSISTENT_MSG_STORE, TransformFun), - transform_store(?TRANSIENT_MSG_STORE, TransformFun). - -transform_store(Store, TransformFun) -> - rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), - rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). +store_names() -> + [?PERSISTENT_MSG_STORE, ?TRANSIENT_MSG_STORE]. -- cgit v1.2.1 From cbcafda448298d83067c1c66536df1f49f52b7de Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Feb 2011 15:55:37 +0000 Subject: better error reporting for failed table integrity checks --- src/rabbit_mnesia.erl | 55 +++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a30f7996..42f7e3b2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -264,45 +264,48 @@ ensure_schema_integrity() -> check_schema_integrity() -> Tables = mnesia:system_info(tables), - case [Error || {Tab, TabDef} <- table_definitions(), - case lists:member(Tab, Tables) of - false -> - Error = {table_missing, Tab}, - true; - true -> - {_, ExpAttrs} = proplists:lookup(attributes, TabDef), - Attrs = mnesia:table_info(Tab, attributes), - Error = {table_attributes_mismatch, Tab, - ExpAttrs, Attrs}, - Attrs /= ExpAttrs - end] of - [] -> check_table_integrity(); - Errors -> {error, Errors} + case check_tables(fun (Tab, TabDef) -> + case lists:member(Tab, Tables) of + false -> {error, {table_missing, Tab}}; + true -> check_table_attributes(Tab, TabDef) + end + end) of + ok -> ok = wait_for_tables(), + check_tables(fun check_table_integrity/2); + Other -> Other end. -check_table_integrity() -> - ok = wait_for_tables(), - case lists:all(fun ({Tab, TabDef}) -> - {_, Match} = proplists:lookup(match, TabDef), - read_test_table(Tab, Match) - end, table_definitions()) of - true -> ok; - false -> {error, invalid_table_content} +check_table_attributes(Tab, TabDef) -> + {_, ExpAttrs} = proplists:lookup(attributes, TabDef), + case mnesia:table_info(Tab, attributes) of + ExpAttrs -> ok; + Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}} end. -read_test_table(Tab, Match) -> +check_table_integrity(Tab, TabDef) -> + {_, Match} = proplists:lookup(match, TabDef), case mnesia:dirty_first(Tab) of '$end_of_table' -> - true; + ok; Key -> ObjList = mnesia:dirty_read(Tab, Key), MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]), case ets:match_spec_run(ObjList, MatchComp) of - ObjList -> true; - _ -> false + ObjList -> ok; + _ -> {error, {table_content_invalid, Tab, Match, ObjList}} end end. +check_tables(Fun) -> + case [Error || {Tab, TabDef} <- table_definitions(), + case Fun(Tab, TabDef) of + ok -> Error = none, false; + {error, Error} -> true + end] of + [] -> ok; + Errors -> {error, Errors} + end. + %% The cluster node config file contains some or all of the disk nodes %% that are members of the cluster this node is / should be a part of. %% -- cgit v1.2.1 From 102c4420102346c0a66ff992eacb23630bd2d3f5 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Feb 2011 15:57:47 +0000 Subject: better name --- src/rabbit_mnesia.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 42f7e3b2..5e990d61 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -271,7 +271,7 @@ check_schema_integrity() -> end end) of ok -> ok = wait_for_tables(), - check_tables(fun check_table_integrity/2); + check_tables(fun check_table_content/2); Other -> Other end. @@ -282,7 +282,7 @@ check_table_attributes(Tab, TabDef) -> Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}} end. -check_table_integrity(Tab, TabDef) -> +check_table_content(Tab, TabDef) -> {_, Match} = proplists:lookup(match, TabDef), case mnesia:dirty_first(Tab) of '$end_of_table' -> -- cgit v1.2.1 From 9d3eb1f0bd42cc23d3ad2474721d0a0a4b4fcf8e Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 22 Feb 2011 16:57:39 +0000 Subject: Revert re-arrangement of upgrade steps --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_basic.erl | 4 ++-- src/rabbit_msg_store.erl | 33 ++++++--------------------------- src/rabbit_variable_queue.erl | 29 ++++++++++++++++++++++++++--- 4 files changed, 35 insertions(+), 33 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 4889abff..17cdedc2 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,4 +65,4 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(store_names/0 :: () -> [atom()]). +-spec(multiple_routing_keys/0 :: () -> 'ok'). diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 376a303e..f29cc805 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -43,8 +43,8 @@ properties_input(), binary()) -> rabbit_types:message()). -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), - rabbit_types:decoded_content()) -> {'ok', rabbit_types:message()} | - {'error', any()}). + rabbit_types:decoded_content()) -> + rabbit_types:ok_or_error2(rabbit_types:message() | any())). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index d798c4f7..ef0e2e0d 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -26,7 +26,7 @@ -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal --export([multiple_routing_keys/0]). %% upgrade +-export([transform_dir/3, force_recovery/2]). %% upgrade -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). @@ -34,9 +34,8 @@ %%---------------------------------------------------------------------------- -include("rabbit_msg_store.hrl"). --include_lib("kernel/include/file.hrl"). --define(SYNC_INTERVAL, 25). %% milliseconds +-define(SYNC_INTERVAL, 5). %% milliseconds -define(CLEAN_FILENAME, "clean.dot"). -define(FILE_SUMMARY_FILENAME, "file_summary.ets"). -define(TRANSFORM_TMP, "transform_tmp"). @@ -106,8 +105,6 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({multiple_routing_keys, []}). - -ifdef(use_specs). -export_type([gc_state/0, file_num/0]). @@ -166,7 +163,9 @@ -spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) -> deletion_thunk()). -spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()). --spec(multiple_routing_keys/0 :: () -> 'ok'). +-spec(force_recovery/2 :: (file:filename(), server()) -> 'ok'). +-spec(transform_dir/3 :: (file:filename(), server(), + fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'). -endif. @@ -1968,25 +1967,6 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, {destination, Destination}]} end. -%%---------------------------------------------------------------------------- -%% upgrade -%%---------------------------------------------------------------------------- - -multiple_routing_keys() -> - [transform_store( - fun ({basic_message, ExchangeName, Routing_Key, Content, - Guid, Persistent}) -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - (_) -> {error, corrupt_message} - end, Store) || Store <- rabbit_variable_queue:store_names()], - ok. - -%% Assumes message store is not running -transform_store(TransformFun, Store) -> - force_recovery(rabbit_mnesia:dir(), Store), - transform_dir(rabbit_mnesia:dir(), Store, TransformFun). - force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), @@ -2017,14 +1997,13 @@ transform_dir(BaseDir, Store, TransformFun) -> transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_misc:ensure_parent_dirs_exist(FileNew), - {ok, #file_info{size=Size}} = file:read_file_info(FileOld), {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]), {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( - RefOld, Size, + RefOld, filelib:file_size(FileOld), fun({Guid, _Size, _Offset, BinMsg}, ok) -> case TransformFun(binary_to_term(BinMsg)) of {ok, MsgNew} -> diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 4eb9c3b8..3ef76d15 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, store_names/0]). + status/1, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -294,6 +294,8 @@ %%---------------------------------------------------------------------------- +-rabbit_upgrade({multiple_routing_keys, []}). + -ifdef(use_specs). -type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}). @@ -1802,5 +1804,26 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> Generator, Limit, Qa, Count + 1, RamIndexCount1, IndexState1) end. -store_names() -> - [?PERSISTENT_MSG_STORE, ?TRANSIENT_MSG_STORE]. +%%---------------------------------------------------------------------------- +%% Upgrading +%%---------------------------------------------------------------------------- + +multiple_routing_keys() -> + transform_storage( + fun ({basic_message, ExchangeName, Routing_Key, Content, + Guid, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + (_) -> {error, corrupt_message} + end), + ok. + + +%% Assumes message store is not running +transform_storage(TransformFun) -> + transform_store(?PERSISTENT_MSG_STORE, TransformFun), + transform_store(?TRANSIENT_MSG_STORE, TransformFun). + +transform_store(Store, TransformFun) -> + rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store), + rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun). -- cgit v1.2.1 From fd53e724c289b17eca48aa2252376231be51eb41 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 17:22:37 +0000 Subject: Added functional tests --- src/gm_soak_test.erl | 130 ++++++++++++++++++++++++++++++++++++++++ src/gm_test.erl | 126 --------------------------------------- src/gm_tests.erl | 165 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 295 insertions(+), 126 deletions(-) create mode 100644 src/gm_soak_test.erl delete mode 100644 src/gm_test.erl create mode 100644 src/gm_tests.erl diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl new file mode 100644 index 00000000..1f8832a6 --- /dev/null +++ b/src/gm_soak_test.erl @@ -0,0 +1,130 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gm_soak_test). + +-export([test/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +%% --------------------------------------------------------------------------- +%% Soak test +%% --------------------------------------------------------------------------- + +get_state() -> + get(state). + +with_state(Fun) -> + put(state, Fun(get_state())). + +inc() -> + case 1 + get(count) of + 100000 -> Now = os:timestamp(), + Start = put(ts, Now), + Diff = timer:now_diff(Now, Start), + Rate = 100000 / (Diff / 1000000), + io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), + put(count, 0); + N -> put(count, N) + end. + +joined([], Members) -> + io:format("Joined ~p (~p members)~n", [self(), length(Members)]), + put(state, dict:from_list([{Member, empty} || Member <- Members])), + put(count, 0), + put(ts, os:timestamp()), + ok. + +members_changed([], Births, Deaths) -> + with_state( + fun (State) -> + State1 = + lists:foldl( + fun (Born, StateN) -> + false = dict:is_key(Born, StateN), + dict:store(Born, empty, StateN) + end, State, Births), + lists:foldl( + fun (Died, StateN) -> + true = dict:is_key(Died, StateN), + dict:store(Died, died, StateN) + end, State1, Deaths) + end), + ok. + +handle_msg([], From, {test_msg, Num}) -> + inc(), + with_state( + fun (State) -> + ok = case dict:find(From, State) of + {ok, died} -> + exit({{from, From}, + {received_posthumous_delivery, Num}}); + {ok, empty} -> ok; + {ok, Num} -> ok; + {ok, Num1} when Num < Num1 -> + exit({{from, From}, + {duplicate_delivery_of, Num1}, + {expecting, Num}}); + {ok, Num1} -> + exit({{from, From}, + {missing_delivery_of, Num}, + {received_early, Num1}}); + error -> + exit({{from, From}, + {received_premature_delivery, Num}}) + end, + dict:store(From, Num + 1, State) + end), + ok. + +terminate([], Reason) -> + io:format("Left ~p (~p)~n", [self(), Reason]), + ok. + +spawn_member() -> + spawn_link( + fun () -> + random:seed(now()), + %% start up delay of no more than 10 seconds + timer:sleep(random:uniform(10000)), + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), + Start = random:uniform(10000), + send_loop(Pid, Start, Start + random:uniform(10000)), + gm:leave(Pid), + spawn_more() + end). + +spawn_more() -> + [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. + +send_loop(_Pid, Target, Target) -> + ok; +send_loop(Pid, Count, Target) when Target > Count -> + case random:uniform(3) of + 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); + _ -> gm:broadcast(Pid, {test_msg, Count}) + end, + timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms + send_loop(Pid, Count + 1, Target). + +test() -> + ok = gm:create_tables(), + spawn_member(), + spawn_member(). diff --git a/src/gm_test.erl b/src/gm_test.erl deleted file mode 100644 index e0a92a0c..00000000 --- a/src/gm_test.erl +++ /dev/null @@ -1,126 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. -%% - --module(gm_test). - --export([test/0]). --export([joined/2, members_changed/3, handle_msg/3, terminate/2]). - --behaviour(gm). - --include("gm_specs.hrl"). - -get_state() -> - get(state). - -with_state(Fun) -> - put(state, Fun(get_state())). - -inc() -> - case 1 + get(count) of - 100000 -> Now = os:timestamp(), - Start = put(ts, Now), - Diff = timer:now_diff(Now, Start), - Rate = 100000 / (Diff / 1000000), - io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), - put(count, 0); - N -> put(count, N) - end. - -joined([], Members) -> - io:format("Joined ~p (~p members)~n", [self(), length(Members)]), - put(state, dict:from_list([{Member, empty} || Member <- Members])), - put(count, 0), - put(ts, os:timestamp()), - ok. - -members_changed([], Births, Deaths) -> - with_state( - fun (State) -> - State1 = - lists:foldl( - fun (Born, StateN) -> - false = dict:is_key(Born, StateN), - dict:store(Born, empty, StateN) - end, State, Births), - lists:foldl( - fun (Died, StateN) -> - true = dict:is_key(Died, StateN), - dict:store(Died, died, StateN) - end, State1, Deaths) - end), - ok. - -handle_msg([], From, {test_msg, Num}) -> - inc(), - with_state( - fun (State) -> - ok = case dict:find(From, State) of - {ok, died} -> - exit({{from, From}, - {received_posthumous_delivery, Num}}); - {ok, empty} -> ok; - {ok, Num} -> ok; - {ok, Num1} when Num < Num1 -> - exit({{from, From}, - {duplicate_delivery_of, Num1}, - {expecting, Num}}); - {ok, Num1} -> - exit({{from, From}, - {missing_delivery_of, Num}, - {received_early, Num1}}); - error -> - exit({{from, From}, - {received_premature_delivery, Num}}) - end, - dict:store(From, Num + 1, State) - end), - ok. - -terminate([], Reason) -> - io:format("Left ~p (~p)~n", [self(), Reason]), - ok. - -spawn_member() -> - spawn_link( - fun () -> - random:seed(now()), - %% start up delay of no more than 10 seconds - timer:sleep(random:uniform(10000)), - {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), - Start = random:uniform(10000), - send_loop(Pid, Start, Start + random:uniform(10000)), - gm:leave(Pid), - spawn_more() - end). - -spawn_more() -> - [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. - -send_loop(_Pid, Target, Target) -> - ok; -send_loop(Pid, Count, Target) when Target > Count -> - case random:uniform(3) of - 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); - _ -> gm:broadcast(Pid, {test_msg, Count}) - end, - timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms - send_loop(Pid, Count + 1, Target). - -test() -> - ok = gm:create_tables(), - spawn_member(), - spawn_member(). diff --git a/src/gm_tests.erl b/src/gm_tests.erl new file mode 100644 index 00000000..38b3db2f --- /dev/null +++ b/src/gm_tests.erl @@ -0,0 +1,165 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gm_tests). + +-export([test_join_leave/0, + test_broadcast/0, + test_confirmed_broadcast/0, + test_member_death/0, + all_tests/0]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +joined(Pid, Members) -> + Pid ! {joined, self(), Members}, + ok. + +members_changed(Pid, Births, Deaths) -> + Pid ! {members_changed, self(), Births, Deaths}, + ok. + +handle_msg(Pid, From, Msg) -> + Pid ! {msg, self(), From, Msg}, + ok. + +terminate(Pid, Reason) -> + Pid ! {termination, self(), Reason}, + ok. + +%% --------------------------------------------------------------------------- +%% Functional tests +%% --------------------------------------------------------------------------- + +all_tests() -> + passed = test_join_leave(), + passed = test_broadcast(), + passed = test_confirmed_broadcast(), + passed = test_member_death(), + passed. + +test_join_leave() -> + with_two_members(fun (_Pid, _Pid2) -> passed end). + +test_broadcast() -> + test_broadcast(fun gm:broadcast/2). + +test_confirmed_broadcast() -> + test_broadcast(fun gm:confirmed_broadcast/2). + +test_member_death() -> + with_two_members( + fun (Pid, Pid2) -> + {ok, Pid3} = gm:start_link(?MODULE, ?MODULE, self()), + passed = receive_joined(Pid3, [Pid, Pid2, Pid3], + timeout_joining_gm_group_3), + passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1), + passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2), + + unlink(Pid3), + exit(Pid3, kill), + + passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))( + Pid, Pid2), + + passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1), + passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2), + + passed + end). + +test_broadcast(Fun) -> + with_two_members(test_broadcast_fun(Fun)). + +test_broadcast_fun(Fun) -> + fun (Pid, Pid2) -> + ok = Fun(Pid, magic_message), + passed = receive_or_throw({msg, Pid, Pid, magic_message}, + timeout_waiting_for_msg), + passed = receive_or_throw({msg, Pid2, Pid, magic_message}, + timeout_waiting_for_msg) + end. + +with_two_members(Fun) -> + ok = gm:create_tables(), + + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self()), + passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1), + + {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self()), + passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), + + passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), + + passed = Fun(Pid, Pid2), + + ok = gm:leave(Pid), + passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1), + passed = + receive_termination(Pid, normal, timeout_waiting_for_termination_1), + + ok = gm:leave(Pid2), + passed = + receive_termination(Pid2, normal, timeout_waiting_for_termination_2), + + receive X -> throw({unexpected_message, X}) + after 0 -> passed + end. + +receive_or_throw(Pattern, Error) -> + receive Pattern -> + passed + after 1000 -> + throw(Error) + end. + +receive_birth(From, Born, Error) -> + receive {members_changed, From, Birth, Death} -> + [Born] = Birth, + [] = Death, + passed + after 1000 -> + throw(Error) + end. + +receive_death(From, Died, Error) -> + receive {members_changed, From, Birth, Death} -> + [] = Birth, + [Died] = Death, + passed + after 1000 -> + throw(Error) + end. + +receive_joined(From, Members, Error) -> + Members1 = lists:usort(Members), + receive {joined, From, Members2} -> + Members1 = lists:usort(Members2), + passed + after 1000 -> + throw(Error) + end. + +receive_termination(From, Reason, Error) -> + receive {termination, From, Reason1} -> + Reason = Reason1, + passed + after 1000 -> + throw(Error) + end. -- cgit v1.2.1 From c3f44f9a82132f63ad9b1566874c054909c6733f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 22:54:12 +0000 Subject: Magic macroification --- src/gm_tests.erl | 53 +++++++++++++++++++++-------------------------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index 38b3db2f..bb92bc4c 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -27,6 +27,14 @@ -include("gm_specs.hrl"). +-define(RECEIVE_AFTER(Body, Bool, Error), + receive Body -> + true = Bool, + passed + after 1000 -> + throw(Error) + end). + joined(Pid, Members) -> Pid ! {joined, self(), Members}, ok. @@ -123,43 +131,24 @@ with_two_members(Fun) -> end. receive_or_throw(Pattern, Error) -> - receive Pattern -> - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER(Pattern, true, Error). receive_birth(From, Born, Error) -> - receive {members_changed, From, Birth, Death} -> - [Born] = Birth, - [] = Death, - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({members_changed, From, Birth, Death}, + ([Born] == Birth) andalso ([] == Death), + Error). receive_death(From, Died, Error) -> - receive {members_changed, From, Birth, Death} -> - [] = Birth, - [Died] = Death, - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({members_changed, From, Birth, Death}, + ([] == Birth) andalso ([Died] == Death), + Error). receive_joined(From, Members, Error) -> - Members1 = lists:usort(Members), - receive {joined, From, Members2} -> - Members1 = lists:usort(Members2), - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({joined, From, Members2}, + lists:usort(Members) == lists:usort(Members2), + Error). receive_termination(From, Reason, Error) -> - receive {termination, From, Reason1} -> - Reason = Reason1, - passed - after 1000 -> - throw(Error) - end. + ?RECEIVE_AFTER({termination, From, Reason1}, + Reason == Reason1, + Error). -- cgit v1.2.1 From 8a6cb10fd4817ebf92303e397f797c1a3de6ed57 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 22:55:24 +0000 Subject: consistency --- src/gm_tests.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index bb92bc4c..fd9a6487 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -144,8 +144,8 @@ receive_death(From, Died, Error) -> Error). receive_joined(From, Members, Error) -> - ?RECEIVE_AFTER({joined, From, Members2}, - lists:usort(Members) == lists:usort(Members2), + ?RECEIVE_AFTER({joined, From, Members1}, + lists:usort(Members) == lists:usort(Members1), Error). receive_termination(From, Reason, Error) -> -- cgit v1.2.1 From 5597c0f213da52331b090c1f2f954ccf155dd0cd Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Feb 2011 22:57:01 +0000 Subject: rename --- src/gm_tests.erl | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index fd9a6487..87244153 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -27,7 +27,7 @@ -include("gm_specs.hrl"). --define(RECEIVE_AFTER(Body, Bool, Error), +-define(RECEIVE_OR_THROW(Body, Bool, Error), receive Body -> true = Bool, passed @@ -131,24 +131,24 @@ with_two_members(Fun) -> end. receive_or_throw(Pattern, Error) -> - ?RECEIVE_AFTER(Pattern, true, Error). + ?RECEIVE_OR_THROW(Pattern, true, Error). receive_birth(From, Born, Error) -> - ?RECEIVE_AFTER({members_changed, From, Birth, Death}, - ([Born] == Birth) andalso ([] == Death), - Error). + ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, + ([Born] == Birth) andalso ([] == Death), + Error). receive_death(From, Died, Error) -> - ?RECEIVE_AFTER({members_changed, From, Birth, Death}, - ([] == Birth) andalso ([Died] == Death), - Error). + ?RECEIVE_OR_THROW({members_changed, From, Birth, Death}, + ([] == Birth) andalso ([Died] == Death), + Error). receive_joined(From, Members, Error) -> - ?RECEIVE_AFTER({joined, From, Members1}, - lists:usort(Members) == lists:usort(Members1), - Error). + ?RECEIVE_OR_THROW({joined, From, Members1}, + lists:usort(Members) == lists:usort(Members1), + Error). receive_termination(From, Reason, Error) -> - ?RECEIVE_AFTER({termination, From, Reason1}, - Reason == Reason1, - Error). + ?RECEIVE_OR_THROW({termination, From, Reason1}, + Reason == Reason1, + Error). -- cgit v1.2.1 From a74602a5813a6915f3be26719e84a637fea337f5 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Feb 2011 12:52:55 +0000 Subject: Added test to assert receiving messages in the order they're sent. Other cosmetics --- src/gm_tests.erl | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/src/gm_tests.erl b/src/gm_tests.erl index 87244153..65e9cff0 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -20,6 +20,7 @@ test_broadcast/0, test_confirmed_broadcast/0, test_member_death/0, + test_receive_in_order/0, all_tests/0]). -export([joined/2, members_changed/3, handle_msg/3, terminate/2]). @@ -60,6 +61,7 @@ all_tests() -> passed = test_broadcast(), passed = test_confirmed_broadcast(), passed = test_member_death(), + passed = test_receive_in_order(), passed. test_join_leave() -> @@ -83,6 +85,8 @@ test_member_death() -> unlink(Pid3), exit(Pid3, kill), + %% Have to do some broadcasts to ensure that all members + %% find out about the death. passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))( Pid, Pid2), @@ -92,6 +96,23 @@ test_member_death() -> passed end). +test_receive_in_order() -> + with_two_members( + fun (Pid, Pid2) -> + Numbers = lists:seq(1,1000), + [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end + || N <- Numbers], + passed = receive_numbers( + Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers), + passed = receive_numbers( + Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers), + passed = receive_numbers( + Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers), + passed = receive_numbers( + Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers), + passed + end). + test_broadcast(Fun) -> with_two_members(test_broadcast_fun(Fun)). @@ -112,7 +133,6 @@ with_two_members(Fun) -> {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self()), passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2), - passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2), passed = Fun(Pid, Pid2), @@ -152,3 +172,11 @@ receive_termination(From, Reason, Error) -> ?RECEIVE_OR_THROW({termination, From, Reason1}, Reason == Reason1, Error). + +receive_numbers(_Pid, _Sender, _Error, []) -> + passed; +receive_numbers(Pid, Sender, Error, [N | Numbers]) -> + ?RECEIVE_OR_THROW({msg, Pid, Sender, M}, + M == N, + Error), + receive_numbers(Pid, Sender, Error, Numbers). -- cgit v1.2.1 From eccf06819029cc5c72b0d8b166dca929ba42e620 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Feb 2011 12:54:40 +0000 Subject: Wire in gm_tests to rabbit tests --- src/rabbit_tests.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 49b09508..644c4f96 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -34,6 +34,7 @@ test_content_prop_roundtrip(Datum, Binary) -> Binary = rabbit_binary_generator:encode_properties(Types, Values). %% assertion all_tests() -> + passed = gm_tests:all_tests(), application:set_env(rabbit, file_handles_high_watermark, 10, infinity), ok = file_handle_cache:set_limit(10), passed = test_file_handle_cache(), -- cgit v1.2.1 From 1ee22ba19d1cdfab15811b75d6a4b7a3020eb38d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Feb 2011 13:09:16 +0000 Subject: correction of specs --- include/gm_specs.hrl | 2 +- src/gm.erl | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl index 2109d15d..ee29706e 100644 --- a/include/gm_specs.hrl +++ b/include/gm_specs.hrl @@ -17,7 +17,7 @@ -ifdef(use_specs). -type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}). --type(args() :: [any()]). +-type(args() :: any()). -type(members() :: [pid()]). -spec(joined/2 :: (args(), members()) -> callback_result()). diff --git a/src/gm.erl b/src/gm.erl index 283b2431..b3fb7eca 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -53,13 +53,12 @@ %% to create the tables required. %% %% start_link/3 -%% Provide the group name, the callback module name, and a list of any -%% arguments you wish to be passed into the callback module's -%% functions. The joined/1 will be called when we have joined the -%% group, and the list of arguments will have appended to it a list of -%% the current members of the group. See the comments in -%% behaviour_info/1 below for further details of the callback -%% functions. +%% Provide the group name, the callback module name, and any arguments +%% you wish to be passed into the callback module's functions. The +%% joined/1 will be called when we have joined the group, and the list +%% of arguments will have appended to it a list of the current members +%% of the group. See the comments in behaviour_info/1 below for +%% further details of the callback functions. %% %% leave/1 %% Provide the Pid. Removes the Pid from the group. The callback @@ -421,7 +420,7 @@ -type(group_name() :: any()). -spec(create_tables/0 :: () -> 'ok'). --spec(start_link/3 :: (group_name(), atom(), [any()]) -> +-spec(start_link/3 :: (group_name(), atom(), any()) -> {'ok', pid()} | {'error', any()}). -spec(leave/1 :: (pid()) -> 'ok'). -spec(broadcast/2 :: (pid(), any()) -> 'ok'). -- cgit v1.2.1 From fff7752e4df43bdefecee6a9700b5d34df3097e5 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 23 Feb 2011 13:40:15 +0000 Subject: Fixed incorrect binding pattern in rabbit_mnesia --- src/rabbit_mnesia.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 25767a55..93e20381 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -232,8 +232,8 @@ trie_edge_match() -> #trie_edge{exchange_name = exchange_name_match(), _='_'}. trie_binding_match() -> - #trie_edge{exchange_name = exchange_name_match(), - _='_'}. + #trie_binding{exchange_name = exchange_name_match(), + _='_'}. exchange_name_match() -> resource_match(exchange). queue_name_match() -> -- cgit v1.2.1 From d86469a2af5cc68da909a4698a0ee634f2e8aa8b Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 23 Feb 2011 14:43:00 +0000 Subject: Removed table name intersection in wait_for_tables and cleaned up whitespace changes --- src/rabbit_mnesia.erl | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 93e20381..f2d23dad 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -388,8 +388,7 @@ init_db(ClusterNodes, Force) -> {[], true, [_]} -> %% True single disc node, attempt upgrade case rabbit_upgrade:maybe_upgrade() of - ok -> ok = wait_for_tables(), - ensure_schema_integrity(); + ok -> ensure_schema_integrity(); version_not_available -> schema_ok_or_move() end; {[], true, _} -> @@ -544,17 +543,15 @@ create_local_table_copy(Tab, Type) -> end, ok. -wait_for_replicated_tables() -> - wait_for_tables(replicated_table_names()). +wait_for_replicated_tables() -> wait_for_tables(replicated_table_names()). -wait_for_tables() -> - wait_for_tables(table_names()). +wait_for_tables() -> wait_for_tables(table_names()). wait_for_tables(TableNames) -> - Nonexistent = TableNames -- mnesia:system_info(tables), - case mnesia:wait_for_tables(TableNames -- Nonexistent, 30000) of - ok -> ok; - {timeout, BadTabs} -> + case mnesia:wait_for_tables(TableNames, 30000) of + ok -> + ok; + {timeout, BadTabs} -> throw({error, {timeout_waiting_for_tables, BadTabs}}); {error, Reason} -> throw({error, {failed_waiting_for_tables, Reason}}) -- cgit v1.2.1 From d2199eccd9ecbf0c50666fe793d780cdbbf23ef3 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 23 Feb 2011 15:12:38 +0000 Subject: cosmetic --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f2d23dad..d3cb492e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -551,7 +551,7 @@ wait_for_tables(TableNames) -> case mnesia:wait_for_tables(TableNames, 30000) of ok -> ok; - {timeout, BadTabs} -> + {timeout, BadTabs} -> throw({error, {timeout_waiting_for_tables, BadTabs}}); {error, Reason} -> throw({error, {failed_waiting_for_tables, Reason}}) -- cgit v1.2.1 From 4d36462a0eb49acca8190c9aa6e5b54a59fc5d18 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 24 Feb 2011 14:25:06 +0000 Subject: English, not American --- src/rabbit_reader.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 29321c60..b172db56 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -158,7 +158,7 @@ server_properties(Protocol) -> {copyright, ?COPYRIGHT_MESSAGE}, {information, ?INFORMATION_MESSAGE}]]], - %% Filter duplicated properties in favor of config file provided values + %% Filter duplicated properties in favour of config file provided values lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end, NormalizedConfigServerProps). -- cgit v1.2.1 From a64a627af2739a5556f00064c9b02443bd0c4215 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 24 Feb 2011 15:14:26 +0000 Subject: Dialyzer typo --- src/rabbit_basic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index f29cc805..57aad808 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -44,7 +44,7 @@ -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), rabbit_types:decoded_content()) -> - rabbit_types:ok_or_error2(rabbit_types:message() | any())). + rabbit_types:ok_or_error2(rabbit_types:message(), any())). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: -- cgit v1.2.1 From 6fd77744201852a1fb961809f693d8b27acf7346 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 24 Feb 2011 18:15:34 +0000 Subject: Make memory alarms work correctly over clusters --- Makefile | 4 +- src/rabbit_alarm.erl | 122 +++++++++++++++++++++++++++++++++++--------- src/rabbit_node_monitor.erl | 11 ++-- src/vm_memory_monitor.erl | 4 +- 4 files changed, 107 insertions(+), 34 deletions(-) diff --git a/Makefile b/Makefile index 00c7809d..cdb86aad 100644 --- a/Makefile +++ b/Makefile @@ -177,11 +177,11 @@ stop-rabbit-on-node: all echo "rabbit:stop()." | $(ERL_CALL) set-memory-alarm: all - echo "alarm_handler:set_alarm({vm_memory_high_watermark, []})." | \ + echo "alarm_handler:set_alarm({{vm_memory_high_watermark, node()}, []})." | \ $(ERL_CALL) clear-memory-alarm: all - echo "alarm_handler:clear_alarm(vm_memory_high_watermark)." | \ + echo "alarm_handler:clear_alarm({vm_memory_high_watermark, node()})." | \ $(ERL_CALL) stop-node: diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 37e40981..365a5ed2 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -18,12 +18,14 @@ -behaviour(gen_event). --export([start/0, stop/0, register/2]). +-export([start/0, stop/0, register/2, on_node/2]). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). --record(alarms, {alertees, vm_memory_high_watermark = false}). +-export([remote_conserve_memory/2]). %% Internal use only + +-record(alarms, {alertees, high_watermarks}). %%---------------------------------------------------------------------------- @@ -33,6 +35,7 @@ -spec(start/0 :: () -> 'ok'). -spec(stop/0 :: () -> 'ok'). -spec(register/2 :: (pid(), mfa_tuple()) -> boolean()). +-spec(on_node/2 :: ('up'|'down', node()) -> 'ok'). -endif. @@ -56,32 +59,61 @@ register(Pid, HighMemMFA) -> {register, Pid, HighMemMFA}, infinity). +on_node(Action, Node) -> + gen_event:notify(alarm_handler, {node, Action, Node}). + +remote_conserve_memory(Pid, Conserve) -> + RemoteNode = node(Pid), + %% Can't use alarm_handler:{set,clear}_alarm because that doesn't + %% permit notifying a remote node. + case Conserve of + true -> gen_event:notify( + {alarm_handler, RemoteNode}, + {set_alarm, {{vm_memory_high_watermark, node()}, []}}); + false -> gen_event:notify( + {alarm_handler, RemoteNode}, + {clear_alarm, {vm_memory_high_watermark, node()}}) + end. + %%---------------------------------------------------------------------------- init([]) -> - {ok, #alarms{alertees = dict:new()}}. + {ok, #alarms{alertees = dict:new(), + high_watermarks = sets:new()}}. -handle_call({register, Pid, {M, F, A} = HighMemMFA}, - State = #alarms{alertees = Alertess}) -> - _MRef = erlang:monitor(process, Pid), - ok = case State#alarms.vm_memory_high_watermark of - true -> apply(M, F, A ++ [Pid, true]); - false -> ok - end, - NewAlertees = dict:store(Pid, HighMemMFA, Alertess), - {ok, State#alarms.vm_memory_high_watermark, - State#alarms{alertees = NewAlertees}}; +handle_call({register, Pid, HighMemMFA}, State) -> + {ok, 0 < sets:size(State#alarms.high_watermarks), + internal_register(Pid, HighMemMFA, State)}; handle_call(_Request, State) -> {ok, not_understood, State}. -handle_event({set_alarm, {vm_memory_high_watermark, []}}, State) -> - ok = alert(true, State#alarms.alertees), - {ok, State#alarms{vm_memory_high_watermark = true}}; - -handle_event({clear_alarm, vm_memory_high_watermark}, State) -> - ok = alert(false, State#alarms.alertees), - {ok, State#alarms{vm_memory_high_watermark = false}}; +handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, + State = #alarms{high_watermarks = Highs}) -> + Highs1 = sets:add_element(Node, Highs), + ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, true), + {ok, State#alarms{high_watermarks = Highs1}}; + +handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, + State = #alarms{high_watermarks = Highs}) -> + Highs1 = sets:del_element(Node, Highs), + ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), + {ok, State#alarms{high_watermarks = Highs1}}; + +handle_event({node, up, Node}, State) -> + %% Must do this via notify and not call to avoid possible deadlock. + ok = gen_event:notify( + {alarm_handler, Node}, + {register, self(), {?MODULE, remote_conserve_memory, []}}), + {ok, State}; + +handle_event({node, down, Node}, State = #alarms{high_watermarks = Highs}) -> + Highs1 = sets:del_element(Node, Highs), + ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), + {ok, State#alarms{high_watermarks = Highs1}}; + +handle_event({register, Pid, HighMemMFA}, State) -> + {ok, internal_register(Pid, HighMemMFA, State)}; handle_event(_Event, State) -> {ok, State}. @@ -100,10 +132,50 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. %%---------------------------------------------------------------------------- -alert(_Alert, undefined) -> - ok; -alert(Alert, Alertees) -> + +maybe_alert(Before, After, Alertees, AlarmNode, Action) + when AlarmNode =:= node() -> + %% If we have changed our alarm state, always inform the remotes. + case {sets:is_element(AlarmNode, Before), sets:is_element(AlarmNode, After), + Action} of + {false, true, true} -> alert_remote(Action, Alertees); + {true, false, false} -> alert_remote(Action, Alertees); + _ -> ok + end, + maybe_alert_local(Before, After, Alertees, Action); +maybe_alert(Before, After, Alertees, _AlarmNode, Action) -> + maybe_alert_local(Before, After, Alertees, Action). + +maybe_alert_local(Before, After, Alertees, Action) -> + %% If the overall alarm state has changed, inform the locals. + case {sets:size(Before), sets:size(After), Action} of + {0, 1, true} -> alert_local(Action, Alertees); + {1, 0, false} -> alert_local(Action, Alertees); + _ -> ok + end. + +alert_local(Alert, Alertees) -> + alert(Alert, Alertees, fun erlang:'=:='/2). + +alert_remote(Alert, Alertees) -> + alert(Alert, Alertees, fun erlang:'=/='/2). + +alert(Alert, Alertees, NodeComparator) -> + Node = node(), dict:fold(fun (Pid, {M, F, A}, Acc) -> - ok = erlang:apply(M, F, A ++ [Pid, Alert]), - Acc + case NodeComparator(Node, node(Pid)) of + true -> ok = erlang:apply(M, F, A ++ [Pid, Alert]), + Acc; + false -> Acc + end end, ok, Alertees). + +internal_register(Pid, {M, F, A} = HighMemMFA, + State = #alarms{alertees = Alertees}) -> + _MRef = erlang:monitor(process, Pid), + ok = case sets:is_element(node(), State#alarms.high_watermarks) of + true -> apply(M, F, A ++ [Pid, true]); + false -> ok + end, + NewAlertees = dict:store(Pid, HighMemMFA, Alertees), + State#alarms{alertees = NewAlertees}. diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 817abaa2..061f628d 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -69,6 +69,7 @@ handle_call(_Request, _From, State) -> handle_cast({rabbit_running_on, Node}, State) -> rabbit_log:info("node ~p up~n", [Node]), erlang:monitor(process, {rabbit, Node}), + ok = rabbit_alarm:on_node(up, Node), {noreply, State}; handle_cast(_Msg, State) -> {noreply, State}. @@ -92,10 +93,10 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- -%% TODO: This may turn out to be a performance hog when there are -%% lots of nodes. We really only need to execute this code on -%% *one* node, rather than all of them. +%% TODO: This may turn out to be a performance hog when there are lots +%% of nodes. We really only need to execute some of these statements +%% on *one* node, rather than all of them. handle_dead_rabbit(Node) -> ok = rabbit_networking:on_node_down(Node), - ok = rabbit_amqqueue:on_node_down(Node). - + ok = rabbit_amqqueue:on_node_down(Node), + ok = rabbit_alarm:on_node(down, Node). diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl index 44e1e4b5..dcc6aff5 100644 --- a/src/vm_memory_monitor.erl +++ b/src/vm_memory_monitor.erl @@ -175,10 +175,10 @@ internal_update(State = #state { memory_limit = MemLimit, case {Alarmed, NewAlarmed} of {false, true} -> emit_update_info(set, MemUsed, MemLimit), - alarm_handler:set_alarm({vm_memory_high_watermark, []}); + alarm_handler:set_alarm({{vm_memory_high_watermark, node()}, []}); {true, false} -> emit_update_info(clear, MemUsed, MemLimit), - alarm_handler:clear_alarm(vm_memory_high_watermark); + alarm_handler:clear_alarm({vm_memory_high_watermark, node()}); _ -> ok end, -- cgit v1.2.1 From a6d046e3cbbde4320b201fd7d78a864749fe70a1 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 24 Feb 2011 18:32:56 +0000 Subject: Create log backups in the correct folder on Windows --- scripts/rabbitmq-server.bat | 15 ++++++--------- scripts/rabbitmq-service.bat | 15 ++++++--------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat index 2ca9f2b3..5e2097db 100644 --- a/scripts/rabbitmq-server.bat +++ b/scripts/rabbitmq-server.bat @@ -72,17 +72,14 @@ rem Log management (rotation, filtering based of size...) is left as an exercice set BACKUP_EXTENSION=.1 -set LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log -set SASL_LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log - -set LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log!BACKUP_EXTENSION! -set SASL_LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log!BACKUP_EXTENSION! +set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log +set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log if exist "!LOGS!" ( - type "!LOGS!" >> "!LOGS_BACKUP!" + type "!LOGS!" >> "!LOGS!!BACKUP_EXTENSION!" ) if exist "!SASL_LOGS!" ( - type "!SASL_LOGS!" >> "!SASL_LOGS_BACKUP!" + type "!SASL_LOGS!" >> "!SASL_LOGS!!BACKUP_EXTENSION!" ) rem End of log management @@ -144,10 +141,10 @@ if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" ( +P 1048576 ^ -kernel inet_default_connect_options "[{nodelay, true}]" ^ !RABBITMQ_LISTEN_ARG! ^ --kernel error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!.log"\"} ^ +-kernel error_logger {file,\""!LOGS:\=/!"\"} ^ !RABBITMQ_SERVER_ERL_ARGS! ^ -sasl errlog_type error ^ --sasl sasl_error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!-sasl.log"\"} ^ +-sasl sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^ -os_mon start_cpu_sup true ^ -os_mon start_disksup false ^ -os_mon start_memsup false ^ diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index bc452fea..aa428a8c 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -105,17 +105,14 @@ rem Log management (rotation, filtering based on size...) is left as an exercise set BACKUP_EXTENSION=.1 -set LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log -set SASL_LOGS=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log - -set LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!.log!BACKUP_EXTENSION! -set SASL_LOGS_BACKUP=!RABBITMQ_BASE!\log\!RABBITMQ_NODENAME!-sasl.log!BACKUP_EXTENSION! +set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log +set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log if exist "!LOGS!" ( - type "!LOGS!" >> "!LOGS_BACKUP!" + type "!LOGS!" >> "!LOGS!!BACKUP_EXTENSION!" ) if exist "!SASL_LOGS!" ( - type "!SASL_LOGS!" >> "!SASL_LOGS_BACKUP!" + type "!SASL_LOGS!" >> "!SASL_LOGS!!BACKUP_EXTENSION!" ) rem End of log management @@ -209,10 +206,10 @@ set ERLANG_SERVICE_ARGUMENTS= ^ +A30 ^ -kernel inet_default_connect_options "[{nodelay,true}]" ^ !RABBITMQ_LISTEN_ARG! ^ --kernel error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!.log"\"} ^ +-kernel error_logger {file,\""!LOGS:\=/!"\"} ^ !RABBITMQ_SERVER_ERL_ARGS! ^ -sasl errlog_type error ^ --sasl sasl_error_logger {file,\""!RABBITMQ_LOG_BASE!/!RABBITMQ_NODENAME!-sasl.log"\"} ^ +-sasl sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^ -os_mon start_cpu_sup true ^ -os_mon start_disksup false ^ -os_mon start_memsup false ^ -- cgit v1.2.1 From d7c926b9377343878f7bc263b8d44f6a1ae1cc8d Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 25 Feb 2011 12:17:17 +0000 Subject: No, we don't supply multi man page any more. --- packaging/macports/Portfile.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 67ebcf78..8c22a75e 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -104,7 +104,8 @@ post-destroot { file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmq-server file copy ${wrappersbin}/rabbitmq-multi ${wrappersbin}/rabbitmqctl - xinstall -m 644 -W ${mansrc}/man1 rabbitmq-multi.1.gz rabbitmq-server.1.gz rabbitmqctl.1.gz ${mandest}/man1/ + xinstall -m 644 -W ${mansrc}/man1 rabbitmq-server.1.gz rabbitmqctl.1.gz \ + ${mandest}/man1/ xinstall -m 644 -W ${mansrc}/man5 rabbitmq.conf.5.gz ${mandest}/man5/ } -- cgit v1.2.1 From 6c854337b76061d06fb1dd5e9db4976fc5b9e6f4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 12:25:16 +0000 Subject: Make documentation accurate for current API... --- src/gm.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index b3fb7eca..b21217f6 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -55,14 +55,14 @@ %% start_link/3 %% Provide the group name, the callback module name, and any arguments %% you wish to be passed into the callback module's functions. The -%% joined/1 will be called when we have joined the group, and the list -%% of arguments will have appended to it a list of the current members -%% of the group. See the comments in behaviour_info/1 below for -%% further details of the callback functions. +%% joined/2 will be called when we have joined the group, with the +%% arguments passed to start_link and a list of the current members of +%% the group. See the comments in behaviour_info/1 below for further +%% details of the callback functions. %% %% leave/1 %% Provide the Pid. Removes the Pid from the group. The callback -%% terminate/1 function will be called. +%% terminate/2 function will be called. %% %% broadcast/2 %% Provide the Pid and a Message. The message will be sent to all @@ -455,16 +455,16 @@ behaviour_info(callbacks) -> %% quickly, it's possible that we will never see that member %% appear in either births or deaths. However we are guaranteed %% that (1) we will see a member joining either in the births - %% here, or in the members passed to joined/1 before receiving + %% here, or in the members passed to joined/2 before receiving %% any messages from it; and (2) we will not see members die that %% we have not seen born (or supplied in the members to - %% joined/1). + %% joined/2). {members_changed, 3}, %% Supplied with Args provided in start_link, the sender, and the %% message. This does get called for messages injected by this %% member, however, in such cases, there is no special - %% significance of this call: it does not indicate that the + %% significance of this invocation: it does not indicate that the %% message has made it to any other members, let alone all other %% members. {handle_msg, 3}, -- cgit v1.2.1 From 522e08893e39b4f843f319d504812f8d60249769 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Fri, 25 Feb 2011 12:36:45 +0000 Subject: We renamed .conf to -env.conf. --- packaging/macports/Portfile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 7583d668..809f518b 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -104,7 +104,7 @@ post-destroot { xinstall -m 644 -W ${mansrc}/man1 rabbitmq-server.1.gz rabbitmqctl.1.gz \ ${mandest}/man1/ - xinstall -m 644 -W ${mansrc}/man5 rabbitmq.conf.5.gz ${mandest}/man5/ + xinstall -m 644 -W ${mansrc}/man5 rabbitmq-env.conf.5.gz ${mandest}/man5/ } pre-install { -- cgit v1.2.1 From f4c23c93527e9bd37243ee883b552b478427c7c2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 13:05:21 +0000 Subject: Additional word --- src/gm.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index b21217f6..70633a08 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -55,10 +55,10 @@ %% start_link/3 %% Provide the group name, the callback module name, and any arguments %% you wish to be passed into the callback module's functions. The -%% joined/2 will be called when we have joined the group, with the -%% arguments passed to start_link and a list of the current members of -%% the group. See the comments in behaviour_info/1 below for further -%% details of the callback functions. +%% joined/2 function will be called when we have joined the group, +%% with the arguments passed to start_link and a list of the current +%% members of the group. See the comments in behaviour_info/1 below +%% for further details of the callback functions. %% %% leave/1 %% Provide the Pid. Removes the Pid from the group. The callback -- cgit v1.2.1 From 1633fd03f06b5b43006ef83833d5a0c9f28c510f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 14:45:45 +0000 Subject: multiple_routing_keys/0 is not part of the backing_queue --- include/rabbit_backing_queue_spec.hrl | 1 - src/rabbit_variable_queue.erl | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 17cdedc2..accb2c0e 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -65,4 +65,3 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). --spec(multiple_routing_keys/0 :: () -> 'ok'). diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 3ef76d15..13fe9fda 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -353,6 +353,8 @@ -include("rabbit_backing_queue_spec.hrl"). +-spec(multiple_routing_keys/0 :: () -> 'ok'). + -endif. -define(BLANK_DELTA, #delta { start_seq_id = undefined, -- cgit v1.2.1 From c62cfd0cea0a4691d3b7806d0353eaeca8d7a375 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Feb 2011 14:46:30 +0000 Subject: remove blank trailing line --- src/rabbit_msg_store.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index ef0e2e0d..907f567b 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -2018,4 +2018,3 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), ok. - -- cgit v1.2.1 From fcb9a05d24be5a256de6539b0208371cf17aae8f Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 25 Feb 2011 16:12:21 +0000 Subject: Stricter msg store upgrade --- src/rabbit_msg_store.erl | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 907f567b..9e65e442 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1970,8 +1970,7 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), file:delete(filename:join(Dir, ?CLEAN_FILENAME)), - [file:delete(filename:join(Dir, File)) || - File <- list_sorted_file_names(Dir, ?FILE_EXTENSION_TMP)], + recover_crashed_compactions(BaseDir), ok. foreach_file(D, Fun, Files) -> @@ -1986,12 +1985,11 @@ transform_dir(BaseDir, Store, TransformFun) -> TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); - false -> OldFileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), - foreach_file(Dir, TmpDir, TransformFile, OldFileList), - foreach_file(Dir, fun file:delete/1, OldFileList), - NewFileList = list_sorted_file_names(TmpDir, ?FILE_EXTENSION), - foreach_file(TmpDir, Dir, fun file:copy/2, NewFileList), - foreach_file(TmpDir, fun file:delete/1, NewFileList), + false -> FileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), + foreach_file(Dir, TmpDir, TransformFile, FileList), + foreach_file(Dir, fun file:delete/1, FileList), + foreach_file(TmpDir, Dir, fun file:copy/2, FileList), + foreach_file(TmpDir, fun file:delete/1, FileList), ok = file:del_dir(TmpDir) end. @@ -2005,15 +2003,9 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), fun({Guid, _Size, _Offset, BinMsg}, ok) -> - case TransformFun(binary_to_term(BinMsg)) of - {ok, MsgNew} -> - {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), - ok; - {error, Reason} -> - error_logger:error_msg("Message transform failed: ~p~n", - [Reason]), - ok - end + {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), + {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + ok end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), -- cgit v1.2.1 From bbc9fcbcb631404e46259a606649a6bb5648db57 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Feb 2011 11:02:29 +0000 Subject: ...and untabify. --- src/rabbit_channel.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index d8a332f3..7dc07e5a 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1288,12 +1288,12 @@ is_message_persistent(Content) -> process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State, no_route), maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], - return_unroutable, State), + return_unroutable, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State, no_consumers), maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], - return_not_delivered, State), + return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); -- cgit v1.2.1 From 926d3b66b33dd75f87a8bc903a60e00ecbbea96f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 28 Feb 2011 12:55:44 +0000 Subject: Removing gm_test from bug23554 which has in bug23727 become gm_soak_test --- src/gm_test.erl | 126 -------------------------------------------------------- 1 file changed, 126 deletions(-) delete mode 100644 src/gm_test.erl diff --git a/src/gm_test.erl b/src/gm_test.erl deleted file mode 100644 index e0a92a0c..00000000 --- a/src/gm_test.erl +++ /dev/null @@ -1,126 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. -%% - --module(gm_test). - --export([test/0]). --export([joined/2, members_changed/3, handle_msg/3, terminate/2]). - --behaviour(gm). - --include("gm_specs.hrl"). - -get_state() -> - get(state). - -with_state(Fun) -> - put(state, Fun(get_state())). - -inc() -> - case 1 + get(count) of - 100000 -> Now = os:timestamp(), - Start = put(ts, Now), - Diff = timer:now_diff(Now, Start), - Rate = 100000 / (Diff / 1000000), - io:format("~p seeing ~p msgs/sec~n", [self(), Rate]), - put(count, 0); - N -> put(count, N) - end. - -joined([], Members) -> - io:format("Joined ~p (~p members)~n", [self(), length(Members)]), - put(state, dict:from_list([{Member, empty} || Member <- Members])), - put(count, 0), - put(ts, os:timestamp()), - ok. - -members_changed([], Births, Deaths) -> - with_state( - fun (State) -> - State1 = - lists:foldl( - fun (Born, StateN) -> - false = dict:is_key(Born, StateN), - dict:store(Born, empty, StateN) - end, State, Births), - lists:foldl( - fun (Died, StateN) -> - true = dict:is_key(Died, StateN), - dict:store(Died, died, StateN) - end, State1, Deaths) - end), - ok. - -handle_msg([], From, {test_msg, Num}) -> - inc(), - with_state( - fun (State) -> - ok = case dict:find(From, State) of - {ok, died} -> - exit({{from, From}, - {received_posthumous_delivery, Num}}); - {ok, empty} -> ok; - {ok, Num} -> ok; - {ok, Num1} when Num < Num1 -> - exit({{from, From}, - {duplicate_delivery_of, Num1}, - {expecting, Num}}); - {ok, Num1} -> - exit({{from, From}, - {missing_delivery_of, Num}, - {received_early, Num1}}); - error -> - exit({{from, From}, - {received_premature_delivery, Num}}) - end, - dict:store(From, Num + 1, State) - end), - ok. - -terminate([], Reason) -> - io:format("Left ~p (~p)~n", [self(), Reason]), - ok. - -spawn_member() -> - spawn_link( - fun () -> - random:seed(now()), - %% start up delay of no more than 10 seconds - timer:sleep(random:uniform(10000)), - {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), - Start = random:uniform(10000), - send_loop(Pid, Start, Start + random:uniform(10000)), - gm:leave(Pid), - spawn_more() - end). - -spawn_more() -> - [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))]. - -send_loop(_Pid, Target, Target) -> - ok; -send_loop(Pid, Count, Target) when Target > Count -> - case random:uniform(3) of - 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count}); - _ -> gm:broadcast(Pid, {test_msg, Count}) - end, - timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms - send_loop(Pid, Count + 1, Target). - -test() -> - ok = gm:create_tables(), - spawn_member(), - spawn_member(). -- cgit v1.2.1 From edb6f73c32720660b1705642bc5192226a9cca30 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 28 Feb 2011 17:11:58 +0000 Subject: Large amounts of debitrotting due to changes to confirms api and such like. Sadly mirrored confirms aren't working again yet... not really sure why --- src/rabbit_mirror_queue_master.erl | 4 +- src/rabbit_mirror_queue_slave.erl | 115 +++++++++++++++++++++++++++++-------- 2 files changed, 92 insertions(+), 27 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 11831a29..e2f9b020 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -232,8 +232,8 @@ ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> needs_idle_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> BQ:needs_idle_timeout(BQS). -idle_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> - BQ:idle_timeout(BQS). +idle_timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> + State #state { backing_queue_state = BQ:idle_timeout(BQS) }. handle_pre_hibernate(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 4f9d2066..396e3c35 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -67,7 +67,8 @@ -export([start_link/1, set_maximum_since_use/2]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, handle_pre_hibernate/1]). + code_change/3, handle_pre_hibernate/1, prioritise_call/3, + prioritise_cast/2]). -export([joined/2, members_changed/3, handle_msg/3]). @@ -82,6 +83,7 @@ master_node, backing_queue, backing_queue_state, + sync_timer_ref, rate_timer_ref, sender_queues, %% :: Pid -> MsgQ @@ -91,6 +93,7 @@ guid_to_channel %% for confirms }). +-define(SYNC_INTERVAL, 25). %% milliseconds -define(RAM_DURATION_UPDATE_INTERVAL, 5000). start_link(Q) -> @@ -137,6 +140,7 @@ init([#amqqueue { name = QueueName } = Q]) -> backing_queue = BQ, backing_queue_state = BQS, rate_timer_ref = undefined, + sync_timer_ref = undefined, sender_queues = dict:new(), guid_ack = dict:new(), @@ -212,7 +216,14 @@ handle_cast(update_ram_duration, rabbit_memory_monitor:report_ram_duration(self(), RamDuration), BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), noreply(State #state { rate_timer_ref = just_measured, - backing_queue_state = BQS2 }). + backing_queue_state = BQS2 }); + +handle_cast(sync_timeout, State) -> + noreply(backing_queue_idle_timeout( + State #state { sync_timer_ref = undefined })). + +handle_info(timeout, State) -> + noreply(backing_queue_idle_timeout(State)); handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}. @@ -245,12 +256,30 @@ code_change(_OldVsn, State, _Extra) -> handle_pre_hibernate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% mainly copied from amqqueue_process - BQS1 = BQ:handle_pre_hibernate(BQS), - %% no activity for a while == 0 egress and ingress rates + {RamDuration, BQS1} = BQ:ram_duration(BQS), DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), infinity), + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS2 })}. + BQS3 = BQ:handle_pre_hibernate(BQS2), + {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}. + +prioritise_call(Msg, _From, _State) -> + case Msg of + {maybe_run_queue_via_backing_queue, _Mod, _Fun} -> 6; + {gm_deaths, _Deaths} -> 5; + _ -> 0 + end. + +prioritise_cast(Msg, _State) -> + case Msg of + update_ram_duration -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + {maybe_run_queue_via_backing_queue, _Mod, _Fun} -> 6; + sync_timeout -> 6; + {gm, _Msg} -> 5; + _ -> 0 + end. %% --------------------------------------------------------------------------- %% GM @@ -285,12 +314,9 @@ handle_msg([SPid], _From, Msg) -> maybe_run_queue_via_backing_queue( Mod, Fun, State = #state { backing_queue = BQ, - backing_queue_state = BQS, - guid_to_channel = GTC }) -> + backing_queue_state = BQS }) -> {Guids, BQS1} = BQ:invoke(Mod, Fun, BQS), - GTC1 = lists:foldl(fun maybe_confirm_message/2, GTC, Guids), - State #state { backing_queue_state = BQS1, - guid_to_channel = GTC1 }. + confirm_messages(Guids, State #state { backing_queue_state = BQS1 }). record_confirm_or_confirm(#delivery { msg_seq_no = undefined }, _Q, GTC) -> GTC; @@ -305,13 +331,27 @@ record_confirm_or_confirm(#delivery { sender = ChPid, msg_seq_no = MsgSeqNo }, ok = rabbit_channel:confirm(ChPid, MsgSeqNo), GTC. -maybe_confirm_message(Guid, GTC) -> - case dict:find(Guid, GTC) of - {ok, {ChPid, MsgSeqNo}} when MsgSeqNo =/= undefined -> - ok = rabbit_channel:confirm(ChPid, MsgSeqNo), - dict:erase(Guid, GTC); - error -> - GTC +confirm_messages(Guids, State = #state { guid_to_channel = GTC }) -> + {CMs, GTC1} = + lists:foldl( + fun(Guid, {CMs, GTC0}) -> + case dict:find(Guid, GTC0) of + {ok, {ChPid, MsgSeqNo}} -> + {gb_trees_cons(ChPid, MsgSeqNo, CMs), + dict:erase(Guid, GTC0)}; + _ -> + {CMs, GTC0} + end + end, {gb_trees:empty(), GTC}, Guids), + gb_trees:map(fun(ChPid, MsgSeqNos) -> + rabbit_channel:confirm(ChPid, MsgSeqNos) + end, CMs), + State #state { guid_to_channel = GTC1 }. + +gb_trees_cons(Key, Value, Tree) -> + case gb_trees:lookup(Key, Tree) of + {value, Values} -> gb_trees:update(Key, [Value | Values], Tree); + none -> gb_trees:insert(Key, [Value], Tree) end. handle_process_result({ok, State}) -> noreply(State); @@ -348,15 +388,39 @@ promote_me(From, #state { q = Q, {become, rabbit_amqqueue_process, QueueState, hibernate}. noreply(State) -> - {noreply, next_state(State), hibernate}. + {NewState, Timeout} = next_state(State), + {noreply, NewState, Timeout}. reply(Reply, State) -> - {reply, Reply, next_state(State), hibernate}. + {NewState, Timeout} = next_state(State), + {reply, Reply, NewState, Timeout}. next_state(State) -> - ensure_rate_timer(State). + State1 = #state { backing_queue = BQ, backing_queue_state = BQS } = + ensure_rate_timer(State), + case BQ:needs_idle_timeout(BQS) of + true -> {ensure_sync_timer(State1), 0}; + false -> {stop_sync_timer(State1), hibernate} + end. %% copied+pasted from amqqueue_process +backing_queue_idle_timeout(State = #state { backing_queue = BQ }) -> + maybe_run_queue_via_backing_queue( + BQ, fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State). + +ensure_sync_timer(State = #state { sync_timer_ref = undefined }) -> + {ok, TRef} = timer:apply_after( + ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), + State #state { sync_timer_ref = TRef }; +ensure_sync_timer(State) -> + State. + +stop_sync_timer(State = #state { sync_timer_ref = undefined }) -> + State; +stop_sync_timer(State = #state { sync_timer_ref = TRef }) -> + {ok, cancel} = timer:cancel(TRef), + State #state { sync_timer_ref = undefined }. + ensure_rate_timer(State = #state { rate_timer_ref = undefined }) -> {ok, TRef} = timer:apply_after( ?RAM_DURATION_UPDATE_INTERVAL, @@ -438,10 +502,11 @@ process_instruction( {true, AckRequired} -> {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), - {GA1, GTC3} = case AckRequired of - true -> {dict:store(Guid, AckTag, GA), GTC1}; - false -> {GA, maybe_confirm_message(Guid, GTC1)} - end, + {GA1, GTC3} = + case AckRequired of + true -> {dict:store(Guid, AckTag, GA), GTC1}; + false -> {GA, confirm_messages([Guid], GTC1)} + end, State1 #state { backing_queue_state = BQS1, guid_ack = GA1, guid_to_channel = GTC3 } -- cgit v1.2.1 From ab52e4b4f9e7632eccd4ea769b28a744272d595f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 28 Feb 2011 18:09:01 +0000 Subject: confirms still don't work... but it needs sleep to fix this. --- src/rabbit_mirror_queue_slave.erl | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 396e3c35..df9a28f4 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -328,7 +328,7 @@ record_confirm_or_confirm( dict:store(Guid, {ChPid, MsgSeqNo}, GTC); record_confirm_or_confirm(#delivery { sender = ChPid, msg_seq_no = MsgSeqNo }, _Q, GTC) -> - ok = rabbit_channel:confirm(ChPid, MsgSeqNo), + ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), GTC. confirm_messages(Guids, State = #state { guid_to_channel = GTC }) -> @@ -502,14 +502,13 @@ process_instruction( {true, AckRequired} -> {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), - {GA1, GTC3} = - case AckRequired of - true -> {dict:store(Guid, AckTag, GA), GTC1}; - false -> {GA, confirm_messages([Guid], GTC1)} - end, + GA1 = case AckRequired of + true -> dict:store(Guid, AckTag, GA); + false -> GA + end, State1 #state { backing_queue_state = BQS1, guid_ack = GA1, - guid_to_channel = GTC3 } + guid_to_channel = GTC1 } end}; process_instruction({set_length, Length}, State = #state { backing_queue = BQ, -- cgit v1.2.1 From 3bef7dc2825c1274c7f4869c34c2d5af6640e20f Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 1 Mar 2011 15:44:37 +0000 Subject: First cut of pushing edge cleaning out of main topic bind delete tx --- src/rabbit_exchange_type_topic.erl | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index c1741b30..a23df31f 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -66,16 +66,21 @@ add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, _X, Bs) -> - lists:foreach(fun remove_binding/1, Bs), + ToDelete = + lists:foldr(fun(B = #binding{source = X, destination = D}, Acc) -> + [{FinalNode, _} | _] = binding_path(B), + [{X, FinalNode, D} | Acc] + end, [], Bs), + [trie_remove_binding(X, FinalNode, D) || {X, FinalNode, D} <- ToDelete], ok; -remove_bindings(false, _X, _Bs) -> +remove_bindings(false, _X, Bs) -> + [rabbit_misc:execute_mnesia_transaction( + fun() -> remove_path_if_empty(X, binding_path(B)) end) + || B = #binding{source = X} <- Bs], ok. -remove_binding(#binding{source = X, key = K, destination = D}) -> - Path = [{FinalNode, _} | _] = follow_down_get_path(X, split_topic_key(K)), - trie_remove_binding(X, FinalNode, D), - remove_path_if_empty(X, Path), - ok. +binding_path(#binding{source = X, key = K}) -> + follow_down_get_path(X, split_topic_key(K)). assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). -- cgit v1.2.1 From 1ed39dee2676f0519cf061a08780202ee72f8aac Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 1 Mar 2011 16:10:55 +0000 Subject: Correct foldr -> foldl --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 0b43147d..25cdcc31 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -69,7 +69,7 @@ add_binding(false, _Exchange, _Binding) -> remove_bindings(true, _X, Bs) -> ToDelete = - lists:foldr(fun(B = #binding{source = X, destination = D}, Acc) -> + lists:foldl(fun(B = #binding{source = X, destination = D}, Acc) -> [{FinalNode, _} | _] = binding_path(B), [{X, FinalNode, D} | Acc] end, [], Bs), -- cgit v1.2.1 From 5e3b5b3b898fffcda5f897687a90dfe95669e989 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 2 Mar 2011 13:01:30 +0000 Subject: Fixed confirms in HA queues. Broke slave promotion. Will fix --- src/rabbit_mirror_queue_slave.erl | 168 +++++++++++++++++++++++--------------- 1 file changed, 104 insertions(+), 64 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index df9a28f4..5c101ee2 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -88,9 +88,8 @@ sender_queues, %% :: Pid -> MsgQ guid_ack, %% :: Guid -> AckTag - seen, %% Set Guid - guid_to_channel %% for confirms + guid_status }). -define(SYNC_INTERVAL, 25). %% milliseconds @@ -144,9 +143,7 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), guid_ack = dict:new(), - seen = sets:new(), - - guid_to_channel = dict:new() + guid_status = dict:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}; @@ -318,35 +315,41 @@ maybe_run_queue_via_backing_queue( {Guids, BQS1} = BQ:invoke(Mod, Fun, BQS), confirm_messages(Guids, State #state { backing_queue_state = BQS1 }). -record_confirm_or_confirm(#delivery { msg_seq_no = undefined }, _Q, GTC) -> - GTC; -record_confirm_or_confirm( - #delivery { sender = ChPid, - message = #basic_message { is_persistent = true, - guid = Guid }, - msg_seq_no = MsgSeqNo }, #amqqueue { durable = true }, GTC) -> - dict:store(Guid, {ChPid, MsgSeqNo}, GTC); -record_confirm_or_confirm(#delivery { sender = ChPid, msg_seq_no = MsgSeqNo }, - _Q, GTC) -> - ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), - GTC. - -confirm_messages(Guids, State = #state { guid_to_channel = GTC }) -> - {CMs, GTC1} = + +needs_confirming(#delivery{ msg_seq_no = undefined }, _State) -> + never; +needs_confirming(#delivery { message = #basic_message { + is_persistent = true } }, + #state { q = #amqqueue { durable = true } }) -> + eventually; +needs_confirming(_Delivery, _State) -> + immediately. + +confirm_messages(Guids, State = #state { guid_status = GS }) -> + {GS1, CMs} = lists:foldl( - fun(Guid, {CMs, GTC0}) -> - case dict:find(Guid, GTC0) of - {ok, {ChPid, MsgSeqNo}} -> - {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, GTC0)}; - _ -> - {CMs, GTC0} + fun (Guid, {GSN, CMsN} = Acc) -> + %% We will never see {confirmed, ChPid} here. + case dict:find(Guid, GSN) of + error -> + %% If it needed confirming, it'll have + %% already been done. + Acc; + {ok, {published, ChPid}} -> + %% Still not seen it from the channel, just + %% record that it's been confirmed. + {dict:store(Guid, {confirmed, ChPid}, GSN), CMsN}; + {ok, {published, ChPid, MsgSeqNo}} -> + %% Seen from both GM and Channel. Can now + %% confirm. + {dict:erase(Guid, GSN), + gb_trees_cons(ChPid, MsgSeqNo, CMsN)} end - end, {gb_trees:empty(), GTC}, Guids), - gb_trees:map(fun(ChPid, MsgSeqNos) -> - rabbit_channel:confirm(ChPid, MsgSeqNos) + end, {GS, gb_trees:empty()}, Guids), + gb_trees:map(fun (ChPid, MsgSeqNos) -> + ok = rabbit_channel:confirm(ChPid, MsgSeqNos) end, CMs), - State #state { guid_to_channel = GTC1 }. + State #state { guid_status = GS1 }. gb_trees_cons(Key, Value, Tree) -> case gb_trees:lookup(Key, Tree) of @@ -363,7 +366,6 @@ promote_me(From, #state { q = Q, backing_queue_state = BQS, rate_timer_ref = RateTRef, sender_queues = SQ, - seen = Seen, guid_ack = GA }) -> rabbit_log:info("Promoting slave ~p for ~s~n", [self(), rabbit_misc:rs(Q #amqqueue.name)]), @@ -371,8 +373,9 @@ promote_me(From, #state { q = Q, true = unlink(GM), gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), + %% TODO fix up seen MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, Seen), + CPid, BQ, BQS, GM, sets:new()), %% We have to do the requeue via this init because otherwise we %% don't have access to the relevent MsgPropsFun. Also, we are %% already in mnesia as the master queue pid. Thus we cannot just @@ -441,59 +444,97 @@ stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> State #state { rate_timer_ref = undefined }. maybe_enqueue_message( - Delivery = #delivery { message = #basic_message { guid = Guid }, - sender = ChPid }, - State = #state { q = Q, - sender_queues = SQ, - seen = Seen, - guid_to_channel = GTC }) -> - case sets:is_element(Guid, Seen) of - true -> - GTC1 = record_confirm_or_confirm(Delivery, Q, GTC), - State #state { guid_to_channel = GTC1, - seen = sets:del_element(Guid, Seen) }; - false -> + Delivery = #delivery { message = #basic_message { guid = Guid }, + msg_seq_no = MsgSeqNo, + sender = ChPid }, + State = #state { sender_queues = SQ, + guid_status = GS }) -> + %% We will never see {published, ChPid, MsgSeqNo} here. + case dict:find(Guid, GS) of + error -> MQ = case dict:find(ChPid, SQ) of {ok, MQ1} -> MQ1; error -> queue:new() end, SQ1 = dict:store(ChPid, queue:in(Delivery, MQ), SQ), - State #state { sender_queues = SQ1 } + State #state { sender_queues = SQ1 }; + {ok, {confirmed, ChPid}} -> + %% BQ has confirmed it but we didn't know what the + %% msg_seq_no was at the time. We do now! + ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), + State #state { guid_status = dict:erase(Guid, GS) }; + {ok, {published, ChPid}} -> + %% It was published to the BQ and we didn't know the + %% msg_seq_no so couldn't confirm it at the time. + case needs_confirming(Delivery, State) of + never -> + State #state { guid_status = dict:erase(Guid, GS) }; + eventually -> + State #state { + guid_status = dict:store( + Guid, {published, ChPid, MsgSeqNo}, GS) }; + immediately -> + ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), + State #state { guid_status = dict:erase(Guid, GS) } + end end. process_instruction( {publish, Deliver, ChPid, MsgProps, Msg = #basic_message { guid = Guid }}, - State = #state { q = Q, - sender_queues = SQ, + State = #state { sender_queues = SQ, backing_queue = BQ, backing_queue_state = BQS, guid_ack = GA, - seen = Seen, - guid_to_channel = GTC }) -> - {SQ1, Seen1, GTC1} = + guid_status = GS }) -> + + %% We really are going to do the publish right now, even though we + %% may not have seen it directly from the channel. As a result, we + %% may know that it needs confirming without knowing its + %% msg_seq_no, which means that we can see the confirmation come + %% back from the backing queue without knowing the msg_seq_no, + %% which means that we're going to have to hang on to the fact + %% that we've seen the guid confirmed until we can associate it + %% with a msg_seq_no. + GS1 = dict:store(Guid, {published, ChPid}, GS), + {SQ1, GS2} = case dict:find(ChPid, SQ) of error -> - {SQ, sets:add_element(Guid, Seen), GTC}; + {SQ, GS1}; {ok, MQ} -> case queue:out(MQ) of {empty, _MQ} -> - {SQ, sets:add_element(Guid, Seen), GTC}; + {SQ, GS1}; {{value, Delivery = #delivery { - message = #basic_message { guid = Guid } }}, + msg_seq_no = MsgSeqNo, + message = #basic_message { guid = Guid } }}, MQ1} -> - GTC2 = record_confirm_or_confirm(Delivery, Q, GTC), - {dict:store(ChPid, MQ1, SQ), Seen, GTC2}; + %% We received the msg from the channel + %% first. Thus we need to deal with confirms + %% here. + {dict:store(ChPid, MQ1, SQ), + case needs_confirming(Delivery, State) of + never -> + GS; + eventually -> + dict:store( + Guid, {published, ChPid, MsgSeqNo}, GS); + immediately -> + ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), + GS + end}; {{value, #delivery {}}, _MQ1} -> %% The instruction was sent to us before we %% were within the mirror_pids within the - %% amqqueue record. We'll never receive the - %% message directly. - {SQ, Seen, GTC} + %% #amqqueue{} record. We'll never receive the + %% message directly from the channel. And the + %% channel will not be expecting any confirms + %% from us. + {SQ, GS} end end, - State1 = State #state { sender_queues = SQ1, - seen = Seen1, - guid_to_channel = GTC1 }, + + State1 = State #state { sender_queues = SQ1, + guid_status = GS2 }, {ok, case Deliver of false -> @@ -507,8 +548,7 @@ process_instruction( false -> GA end, State1 #state { backing_queue_state = BQS1, - guid_ack = GA1, - guid_to_channel = GTC1 } + guid_ack = GA1 } end}; process_instruction({set_length, Length}, State = #state { backing_queue = BQ, -- cgit v1.2.1 From 75b306010463265a291e84d91f9e13ebbd470714 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 2 Mar 2011 13:32:59 +0000 Subject: only confirm delivered messages that need confirming --- src/rabbit_variable_queue.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d1307b85..d0c984cb 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -510,8 +510,13 @@ publish(Msg, MsgProps, State) -> a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { guid = Guid }, - _MsgProps, State = #vqstate { len = 0 }) -> - blind_confirm(self(), gb_sets:singleton(Guid)), + MsgProps = #message_properties { + needs_confirming = NeedsConfirming }, + State = #vqstate { len = 0 }) -> + case NeedsConfirming of + true -> blind_confirm(self(), gb_sets:singleton(Guid)); + false -> ok + end, {undefined, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, -- cgit v1.2.1 From 00b3d0d64d64fbdad5c053fb8ab07932d8eb7341 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 2 Mar 2011 13:48:58 +0000 Subject: Remove erroneous documentation --- src/rabbit_mirror_queue_slave.erl | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 5c101ee2..93f885ba 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -29,40 +29,6 @@ %% %% All instructions from the GM group must be processed in the order %% in which they're received. -%% -%% Thus, we need a queue per sender, and a queue for GM instructions. -%% -%% On receipt of a GM group instruction, three things are possible: -%% 1. The queue of publisher messages is empty. Thus store the GM -%% instruction to the instrQ. -%% 2. The head of the queue of publisher messages has a message that -%% matches the GUID of the GM instruction. Remove the message, and -%% route appropriately. -%% 3. The head of the queue of publisher messages has a message that -%% does not match the GUID of the GM instruction. Throw away the GM -%% instruction: the GM instruction must correspond to a message -%% that we'll never receive. If it did not, then before the current -%% instruction, we would have received an instruction for the -%% message at the head of this queue, thus the head of the queue -%% would have been removed and processed. -%% -%% On receipt of a publisher message, three things are possible: -%% 1. The queue of GM group instructions is empty. Add the message to -%% the relevant queue and await instructions from the GM. -%% 2. The head of the queue of GM group instructions has an -%% instruction matching the GUID of the message. Remove that -%% instruction and act on it. Attempt to process the rest of the -%% instrQ. -%% 3. The head of the queue of GM group instructions has an -%% instruction that does not match the GUID of the message. If the -%% message is from the same publisher as is referred to by the -%% instruction then throw away the GM group instruction and repeat -%% - attempt to match against the next instruction if there is one: -%% The instruction thrown away was for a message we'll never -%% receive. -%% -%% In all cases, we are relying heavily on order preserving messaging -%% both from the GM group and from the publishers. -export([start_link/1, set_maximum_since_use/2]). -- cgit v1.2.1 From d1cc5c276f92b3d3a7aeea8754821fc191c24514 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 2 Mar 2011 15:44:41 +0000 Subject: Extracted ensure_ssl and ssl_transform_fun for use by STOMP --- src/rabbit_networking.erl | 62 +++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 36f61628..c0cb78f5 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -24,7 +24,8 @@ close_connection/2]). %%used by TCP-based transports, e.g. STOMP adapter --export([check_tcp_listener_address/2]). +-export([check_tcp_listener_address/2, + ensure_ssl/0, ssl_transform_fun/1]). -export([tcp_listener_started/3, tcp_listener_stopped/3, start_client/1, start_ssl_client/2]). @@ -88,19 +89,8 @@ boot_ssl() -> {ok, []} -> ok; {ok, SslListeners} -> - ok = rabbit_misc:start_applications([crypto, public_key, ssl]), - {ok, SslOptsConfig} = application:get_env(ssl_options), - % unknown_ca errors are silently ignored prior to R14B unless we - % supply this verify_fun - remove when at least R14B is required - SslOpts = - case proplists:get_value(verify, SslOptsConfig, verify_none) of - verify_none -> SslOptsConfig; - verify_peer -> [{verify_fun, fun([]) -> true; - ([_|_]) -> false - end} - | SslOptsConfig] - end, - [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners], + [start_ssl_listener(Listener, ensure_ssl()) + || Listener <- SslListeners], ok end. @@ -147,6 +137,34 @@ resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6; resolve_family(IP, auto) -> throw({error, {strange_family, IP}}); resolve_family(_, F) -> F. +ensure_ssl() -> + ok = rabbit_misc:start_applications([crypto, public_key, ssl]), + {ok, SslOptsConfig} = application:get_env(ssl_options), + + % unknown_ca errors are silently ignored prior to R14B unless we + % supply this verify_fun - remove when at least R14B is required + case proplists:get_value(verify, SslOptsConfig, verify_none) of + verify_none -> SslOptsConfig; + verify_peer -> [{verify_fun, fun([]) -> true; + ([_|_]) -> false + end} + | SslOptsConfig] + end. + +ssl_transform_fun(SslOpts) -> + fun (Sock) -> + case catch ssl:ssl_accept(Sock, SslOpts, ?SSL_TIMEOUT * 1000) of + {ok, SslSock} -> + rabbit_log:info("upgraded TCP connection ~p to SSL~n", + [self()]), + {ok, #ssl_socket{tcp = Sock, ssl = SslSock}}; + {error, Reason} -> + {error, {ssl_upgrade_error, Reason}}; + {'EXIT', Reason} -> + {error, {ssl_upgrade_failure, Reason}} + end + end. + check_tcp_listener_address(NamePrefix, Port) when is_integer(Port) -> check_tcp_listener_address_auto(NamePrefix, Port); @@ -246,21 +264,7 @@ start_client(Sock) -> start_client(Sock, fun (S) -> {ok, S} end). start_ssl_client(SslOpts, Sock) -> - start_client( - Sock, - fun (Sock1) -> - case catch ssl:ssl_accept(Sock1, SslOpts, ?SSL_TIMEOUT * 1000) of - {ok, SslSock} -> - rabbit_log:info("upgraded TCP connection ~p to SSL~n", - [self()]), - {ok, #ssl_socket{tcp = Sock1, ssl = SslSock}}; - {error, Reason} -> - {error, {ssl_upgrade_error, Reason}}; - {'EXIT', Reason} -> - {error, {ssl_upgrade_failure, Reason}} - - end - end). + start_client(Sock, ssl_transform_fun(SslOpts)). connections() -> [rabbit_connection_sup:reader(ConnSup) || -- cgit v1.2.1 From 974119b73e9f1ff36fcc304928ba39e232c2801c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 2 Mar 2011 16:12:49 +0000 Subject: It's very substantially wrong. --- src/rabbit_amqqueue_process.erl | 6 +-- src/rabbit_mirror_queue_master.erl | 94 +++++++++++++++++++++++++------------- src/rabbit_mirror_queue_slave.erl | 13 ++++-- 3 files changed, 75 insertions(+), 38 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 6aed2f87..207f6bab 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -33,7 +33,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2, prioritise_info/2]). --export([init_with_backing_queue_state/6]). +-export([init_with_backing_queue_state/7]). % Queue's state -record(q, {q, @@ -118,7 +118,7 @@ init(Q) -> {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS, - RateTRef, AckTags, Deliveries) -> + RateTRef, AckTags, Deliveries, GTC) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), case Owner of none -> ok; @@ -140,7 +140,7 @@ init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS, expiry_timer_ref = undefined, ttl = undefined, stats_timer = rabbit_event:init_stats_timer(), - guid_to_channel = dict:new()})), + guid_to_channel = GTC})), lists:foldl( fun (Delivery, StateN) -> {_Delivered, StateN1} = deliver_or_enqueue(Delivery, StateN), diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index e2f9b020..b05d6973 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -37,7 +37,7 @@ backing_queue, backing_queue_state, set_delivered, - seen + seen_status }). %% --------------------------------------------------------------------------- @@ -70,15 +70,15 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> backing_queue = BQ, backing_queue_state = BQS, set_delivered = 0, - seen = sets:new() }. + seen_status = dict:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM, Seen) -> +promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, set_delivered = BQ:len(BQS), - seen = Seen }. + seen_status = SeenStatus }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -102,29 +102,61 @@ purge(State = #state { gm = GM, set_delivered = 0 }}. publish(Msg = #basic_message { guid = Guid }, MsgProps, ChPid, - State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - seen = Seen }) -> - case sets:is_element(Guid, Seen) of - true -> State #state { seen = sets:del_element(Guid, Seen) }; - false -> ok = gm:broadcast(GM, {publish, false, ChPid, MsgProps, Msg}), - BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), - State #state { backing_queue_state = BQS1 } - end. + State = #state { gm = GM, + backing_queue = BQ }) -> + {ok, State1} = + maybe_publish( + fun (BQS) -> + ok = gm:broadcast(GM, {publish, false, ChPid, MsgProps, Msg}), + {ok, BQ:publish(Msg, MsgProps, ChPid, BQS)} + end, State), + State1. publish_delivered(AckRequired, Msg = #basic_message { guid = Guid }, MsgProps, - ChPid, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - seen = Seen }) -> - case sets:is_element(Guid, Seen) of - true -> State #state { seen = sets:del_element(Guid, Seen) }; - false -> ok = gm:broadcast(GM, {publish, {true, AckRequired}, ChPid, - MsgProps, Msg}), - {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, - MsgProps, ChPid, BQS), - {AckTag, State #state { backing_queue_state = BQS1 }} + ChPid, State = #state { gm = GM, + backing_queue = BQ }) -> + case maybe_publish( + fun (BQS) -> + ok = gm:broadcast(GM, {publish, {true, AckRequired}, ChPid, + MsgProps, Msg}), + BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS) + end, State) of + {ok, State1} -> + %% publish_delivered but we've already published this + %% message. This means that we received the msg when we + %% were a slave but only via GM, not from the + %% channel. + %% + %% If AckRequired then we would have requeued the message + %% upon our promotion to master. Astonishingly, we think + %% we're empty, which means that someone else has already + %% consumed the message post requeue, and now we're about + %% to send it to another consumer. This could not be more + %% wrong. + +maybe_publish(Fun, State = #state { seen_status = SS, + backing_queue_state = BQS }) -> + %% We will never see {published, ChPid, MsgSeqNo} here. + case dict:find(Guid, SS) of + error -> + {Result, BQS1} = Fun(BQS), + {Result, State #state { backing_queue_state = BQS1 }}; + {ok, {published, ChPid}} -> + %% It already got published when we were a slave and no + %% confirmation is waiting. amqqueue_process will have + %% recorded if there's a confirm due to arrive, so can + %% delete entry. + {ok, State #state { seen_status = dict:erase(Guid, SS) }}; + {ok, {confirmed, ChPid}} -> + %% It got confirmed before we became master, but we've + %% only just received the publish from the channel, so + %% couldn't previously know what the msg_seq_no was. Thus + %% confirm now. amqqueue_process will have recorded a + %% confirm is due immediately prior to here (and thus _it_ + %% knows the guid -> msg_seq_no mapping). + ok = rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + self(), ?MODULE, fun (State1) -> {[Guid], State1} end), + {ok, State #state { seen_status = dict:erase(Guid, SS) }} end. dropwhile(Fun, State = #state { gm = GM, @@ -143,7 +175,7 @@ fetch(AckRequired, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, set_delivered = SetDelivered, - seen = Seen }) -> + seen_status = SS }) -> {Result, BQS1} = BQ:fetch(AckRequired, BQS), State1 = State #state { backing_queue_state = BQS1 }, case Result of @@ -154,13 +186,13 @@ fetch(AckRequired, State = #state { gm = GM, ok = gm:broadcast(GM, {fetch, AckRequired, Guid, Remaining}), IsDelivered1 = IsDelivered orelse SetDelivered > 0, SetDelivered1 = lists:max([0, SetDelivered - 1]), - Seen1 = case SetDelivered + SetDelivered1 of - 1 -> sets:new(); %% transition to empty - _ -> Seen - end, + SS1 = case SetDelivered + SetDelivered1 of + 1 -> dict:new(); %% transition to empty + _ -> SS + end, {{Message, IsDelivered1, AckTag, Remaining}, State1 #state { set_delivered = SetDelivered1, - seen = Seen1 }} + seen_status = SS1 }} end. ack(AckTags, State = #state { gm = GM, diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 93f885ba..5cdae16c 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -133,6 +133,8 @@ handle_call({gm_deaths, Deaths}, From, master_node = MNode }) -> rabbit_log:info("Slave ~p saw deaths ~p for ~s~n", [self(), Deaths, rabbit_misc:rs(QueueName)]), + %% The GM has told us about deaths, which means we're not going to + %% receive any more messages from GM case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= MNode -> reply(ok, State); @@ -332,28 +334,31 @@ promote_me(From, #state { q = Q, backing_queue_state = BQS, rate_timer_ref = RateTRef, sender_queues = SQ, - guid_ack = GA }) -> + guid_ack = GA, + guid_status = GS }) -> rabbit_log:info("Promoting slave ~p for ~s~n", [self(), rabbit_misc:rs(Q #amqqueue.name)]), {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), true = unlink(GM), gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), - %% TODO fix up seen MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, sets:new()), + CPid, BQ, BQS, GM, GS), %% We have to do the requeue via this init because otherwise we %% don't have access to the relevent MsgPropsFun. Also, we are %% already in mnesia as the master queue pid. Thus we cannot just %% publish stuff by sending it to ourself - we must pass it %% through to this init, otherwise we can violate ordering %% constraints. + GTC = dict:from_list( + [{Guid, {ChPid, MsgSeqNo}} || + {Guid, {published, ChPid, MsgSeqNo}} <- dict:to_list(GS)]), AckTags = [AckTag || {_Guid, AckTag} <- dict:to_list(GA)], Deliveries = lists:append([queue:to_list(PubQ) || {_ChPid, PubQ} <- dict:to_list(SQ)]), QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( Q, rabbit_mirror_queue_master, MasterState, RateTRef, - AckTags, Deliveries), + AckTags, Deliveries, GTC), {become, rabbit_amqqueue_process, QueueState, hibernate}. noreply(State) -> -- cgit v1.2.1 From 6a88269b83e0e93d50e7e65435c9daeef0fc7ddb Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 2 Mar 2011 21:52:31 +0000 Subject: Always specify rabbit application when looking up ssl_options --- src/rabbit_networking.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index c0cb78f5..53be0190 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -139,7 +139,7 @@ resolve_family(_, F) -> F. ensure_ssl() -> ok = rabbit_misc:start_applications([crypto, public_key, ssl]), - {ok, SslOptsConfig} = application:get_env(ssl_options), + {ok, SslOptsConfig} = application:get_env(rabbit, ssl_options), % unknown_ca errors are silently ignored prior to R14B unless we % supply this verify_fun - remove when at least R14B is required -- cgit v1.2.1 From 5ac968c2f7a20f0b7b9da54c0ec72057b36abfd7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:05:41 +0000 Subject: Remove unused var --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d0c984cb..58a28d32 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -510,7 +510,7 @@ publish(Msg, MsgProps, State) -> a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { guid = Guid }, - MsgProps = #message_properties { + #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> case NeedsConfirming of -- cgit v1.2.1 From 912fd5c0df7a52e99e5c8386c4f3d9894b324f46 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:21:43 +0000 Subject: renaming --- src/rabbit_alarm.erl | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 365a5ed2..9ce468f0 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -25,7 +25,7 @@ -export([remote_conserve_memory/2]). %% Internal use only --record(alarms, {alertees, high_watermarks}). +-record(alarms, {alertees, alarmed_nodes}). %%---------------------------------------------------------------------------- @@ -78,27 +78,27 @@ remote_conserve_memory(Pid, Conserve) -> %%---------------------------------------------------------------------------- init([]) -> - {ok, #alarms{alertees = dict:new(), - high_watermarks = sets:new()}}. + {ok, #alarms{alertees = dict:new(), + alarmed_nodes = sets:new()}}. handle_call({register, Pid, HighMemMFA}, State) -> - {ok, 0 < sets:size(State#alarms.high_watermarks), + {ok, 0 < sets:size(State#alarms.alarmed_nodes), internal_register(Pid, HighMemMFA, State)}; handle_call(_Request, State) -> {ok, not_understood, State}. handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, - State = #alarms{high_watermarks = Highs}) -> - Highs1 = sets:add_element(Node, Highs), - ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, true), - {ok, State#alarms{high_watermarks = Highs1}}; + State = #alarms{alarmed_nodes = AN}) -> + AN1 = sets:add_element(Node, AN), + ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, true), + {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, - State = #alarms{high_watermarks = Highs}) -> - Highs1 = sets:del_element(Node, Highs), - ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), - {ok, State#alarms{high_watermarks = Highs1}}; + State = #alarms{alarmed_nodes = AN}) -> + AN1 = sets:del_element(Node, AN), + ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({node, up, Node}, State) -> %% Must do this via notify and not call to avoid possible deadlock. @@ -107,10 +107,10 @@ handle_event({node, up, Node}, State) -> {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node, down, Node}, State = #alarms{high_watermarks = Highs}) -> - Highs1 = sets:del_element(Node, Highs), - ok = maybe_alert(Highs, Highs1, State#alarms.alertees, Node, false), - {ok, State#alarms{high_watermarks = Highs1}}; +handle_event({node, down, Node}, State = #alarms{alarmed_nodes = AN}) -> + AN1 = sets:del_element(Node, AN), + ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({register, Pid, HighMemMFA}, State) -> {ok, internal_register(Pid, HighMemMFA, State)}; @@ -173,7 +173,7 @@ alert(Alert, Alertees, NodeComparator) -> internal_register(Pid, {M, F, A} = HighMemMFA, State = #alarms{alertees = Alertees}) -> _MRef = erlang:monitor(process, Pid), - ok = case sets:is_element(node(), State#alarms.high_watermarks) of + ok = case sets:is_element(node(), State#alarms.alarmed_nodes) of true -> apply(M, F, A ++ [Pid, true]); false -> ok end, -- cgit v1.2.1 From a8253808e91b19dff6c7bb2b399a04f75005ee7f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:24:17 +0000 Subject: dafter renaming --- src/rabbit_alarm.erl | 14 ++++++++------ src/rabbit_node_monitor.erl | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 9ce468f0..82c921a2 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -18,7 +18,7 @@ -behaviour(gen_event). --export([start/0, stop/0, register/2, on_node/2]). +-export([start/0, stop/0, register/2, on_node_up/1, on_node_down/1]). -export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2, code_change/3]). @@ -35,7 +35,8 @@ -spec(start/0 :: () -> 'ok'). -spec(stop/0 :: () -> 'ok'). -spec(register/2 :: (pid(), mfa_tuple()) -> boolean()). --spec(on_node/2 :: ('up'|'down', node()) -> 'ok'). +-spec(on_node_up/1 :: (node()) -> 'ok'). +-spec(on_node_down/1 :: (node()) -> 'ok'). -endif. @@ -59,8 +60,9 @@ register(Pid, HighMemMFA) -> {register, Pid, HighMemMFA}, infinity). -on_node(Action, Node) -> - gen_event:notify(alarm_handler, {node, Action, Node}). +on_node_up(Node) -> gen_event:notify(alarm_handler, {node_up, Node}). + +on_node_down(Node) -> gen_event:notify(alarm_handler, {node_down, Node}). remote_conserve_memory(Pid, Conserve) -> RemoteNode = node(Pid), @@ -100,14 +102,14 @@ handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; -handle_event({node, up, Node}, State) -> +handle_event({node_up, Node}, State) -> %% Must do this via notify and not call to avoid possible deadlock. ok = gen_event:notify( {alarm_handler, Node}, {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node, down, Node}, State = #alarms{alarmed_nodes = AN}) -> +handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN}) -> AN1 = sets:del_element(Node, AN), ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 061f628d..1917c12c 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -69,7 +69,7 @@ handle_call(_Request, _From, State) -> handle_cast({rabbit_running_on, Node}, State) -> rabbit_log:info("node ~p up~n", [Node]), erlang:monitor(process, {rabbit, Node}), - ok = rabbit_alarm:on_node(up, Node), + ok = rabbit_alarm:on_node_up(Node), {noreply, State}; handle_cast(_Msg, State) -> {noreply, State}. @@ -99,4 +99,4 @@ code_change(_OldVsn, State, _Extra) -> handle_dead_rabbit(Node) -> ok = rabbit_networking:on_node_down(Node), ok = rabbit_amqqueue:on_node_down(Node), - ok = rabbit_alarm:on_node(down, Node). + ok = rabbit_alarm:on_node_down(Node). -- cgit v1.2.1 From dbaf1c2d62ecc348996e752c2b81ac684f3e00e0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:25:54 +0000 Subject: shrink code --- src/rabbit_alarm.erl | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 82c921a2..62c1cc74 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -64,18 +64,14 @@ on_node_up(Node) -> gen_event:notify(alarm_handler, {node_up, Node}). on_node_down(Node) -> gen_event:notify(alarm_handler, {node_down, Node}). -remote_conserve_memory(Pid, Conserve) -> - RemoteNode = node(Pid), - %% Can't use alarm_handler:{set,clear}_alarm because that doesn't - %% permit notifying a remote node. - case Conserve of - true -> gen_event:notify( - {alarm_handler, RemoteNode}, - {set_alarm, {{vm_memory_high_watermark, node()}, []}}); - false -> gen_event:notify( - {alarm_handler, RemoteNode}, - {clear_alarm, {vm_memory_high_watermark, node()}}) - end. +%% Can't use alarm_handler:{set,clear}_alarm because that doesn't +%% permit notifying a remote node. +remote_conserve_memory(Pid, true) -> + gen_event:notify({alarm_handler, node(Pid)}, + {set_alarm, {{vm_memory_high_watermark, node()}, []}}); +remote_conserve_memory(Pid, false) -> + gen_event:notify({alarm_handler, node(Pid)}, + {clear_alarm, {vm_memory_high_watermark, node()}}). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 63aa5b839ab9e30281c5a0cef9f45354e14374d9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:41:45 +0000 Subject: move code around and refactor a bit --- src/rabbit_alarm.erl | 56 ++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 62c1cc74..a4914c0b 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -87,15 +87,17 @@ handle_call(_Request, State) -> {ok, not_understood, State}. handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, - State = #alarms{alarmed_nodes = AN}) -> + State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> AN1 = sets:add_element(Node, AN), - ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, true), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, true), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, - State = #alarms{alarmed_nodes = AN}) -> + State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({node_up, Node}, State) -> @@ -105,9 +107,10 @@ handle_event({node_up, Node}, State) -> {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN}) -> +handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(AN, AN1, State#alarms.alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({register, Pid, HighMemMFA}, State) -> @@ -117,8 +120,8 @@ handle_event(_Event, State) -> {ok, State}. handle_info({'DOWN', _MRef, process, Pid, _Reason}, - State = #alarms{alertees = Alertess}) -> - {ok, State#alarms{alertees = dict:erase(Pid, Alertess)}}; + State = #alarms{alertees = Alertees}) -> + {ok, State#alarms{alertees = dict:erase(Pid, Alertees)}}; handle_info(_Info, State) -> {ok, State}. @@ -131,26 +134,23 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- -maybe_alert(Before, After, Alertees, AlarmNode, Action) - when AlarmNode =:= node() -> - %% If we have changed our alarm state, always inform the remotes. - case {sets:is_element(AlarmNode, Before), sets:is_element(AlarmNode, After), - Action} of - {false, true, true} -> alert_remote(Action, Alertees); - {true, false, false} -> alert_remote(Action, Alertees); - _ -> ok - end, - maybe_alert_local(Before, After, Alertees, Action); -maybe_alert(Before, After, Alertees, _AlarmNode, Action) -> - maybe_alert_local(Before, After, Alertees, Action). - -maybe_alert_local(Before, After, Alertees, Action) -> - %% If the overall alarm state has changed, inform the locals. - case {sets:size(Before), sets:size(After), Action} of - {0, 1, true} -> alert_local(Action, Alertees); - {1, 0, false} -> alert_local(Action, Alertees); - _ -> ok - end. +maybe_alert(BeforeSize, AfterSize, Alertees, AlarmNode, Action) -> + ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, + AlarmNode =:= node(), Action), + ok = maybe_alert_local(BeforeSize, AfterSize, Alertees, Action). + +%% If we have changed our alarm state, always inform the remotes. +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, true) + when BeforeSize < AfterSize -> alert_remote(true, Alertees); +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, false) + when BeforeSize > AfterSize -> alert_remote(false, Alertees); +maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode, _Action) -> + ok. + +%% If the overall alarm state has changed, inform the locals. +maybe_alert_local(0, 1, Alertees, true ) -> alert_local(true, Alertees); +maybe_alert_local(1, 0, Alertees, false ) -> alert_local(false, Alertees); +maybe_alert_local(_, _, _Alertees, _Action) -> ok. alert_local(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=:='/2). -- cgit v1.2.1 From 4a3a2daaed541572a5ae37a950f14964645305f1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 3 Mar 2011 15:49:42 +0000 Subject: avoid tautology --- src/rabbit_alarm.erl | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index a4914c0b..508da5ee 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -90,14 +90,14 @@ handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = sets:add_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, true), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({node_up, Node}, State) -> @@ -110,7 +110,7 @@ handle_event({node_up, Node}, State) -> handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node, false), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), {ok, State#alarms{alarmed_nodes = AN1}}; handle_event({register, Pid, HighMemMFA}, State) -> @@ -134,23 +134,22 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- -maybe_alert(BeforeSize, AfterSize, Alertees, AlarmNode, Action) -> - ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, - AlarmNode =:= node(), Action), - ok = maybe_alert_local(BeforeSize, AfterSize, Alertees, Action). +maybe_alert(BeforeSize, AfterSize, Alertees, AlmNde) -> + ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, AlmNde =:= node()), + ok = maybe_alert_local(BeforeSize, AfterSize, Alertees). %% If we have changed our alarm state, always inform the remotes. -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, true) +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) when BeforeSize < AfterSize -> alert_remote(true, Alertees); -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true, false) +maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) when BeforeSize > AfterSize -> alert_remote(false, Alertees); -maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode, _Action) -> +maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode) -> ok. %% If the overall alarm state has changed, inform the locals. -maybe_alert_local(0, 1, Alertees, true ) -> alert_local(true, Alertees); -maybe_alert_local(1, 0, Alertees, false ) -> alert_local(false, Alertees); -maybe_alert_local(_, _, _Alertees, _Action) -> ok. +maybe_alert_local(0, 1, Alertees) -> alert_local(true, Alertees); +maybe_alert_local(1, 0, Alertees) -> alert_local(false, Alertees); +maybe_alert_local(_, _, _Alertees) -> ok. alert_local(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=:='/2). -- cgit v1.2.1 From ffd695bed82709c57064fcaf46606b607e474140 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 08:11:07 +0000 Subject: simplify --- src/rabbit_alarm.erl | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 508da5ee..34cc4d3c 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -86,19 +86,11 @@ handle_call({register, Pid, HighMemMFA}, State) -> handle_call(_Request, State) -> {ok, not_understood, State}. -handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, - State = #alarms{alarmed_nodes = AN, - alertees = Alertees}) -> - AN1 = sets:add_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), - {ok, State#alarms{alarmed_nodes = AN1}}; +handle_event({set_alarm, {{vm_memory_high_watermark, Node}, []}}, State) -> + {ok, maybe_alert(fun sets:add_element/2, Node, State)}; -handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, - State = #alarms{alarmed_nodes = AN, - alertees = Alertees}) -> - AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), - {ok, State#alarms{alarmed_nodes = AN1}}; +handle_event({clear_alarm, {vm_memory_high_watermark, Node}}, State) -> + {ok, maybe_alert(fun sets:del_element/2, Node, State)}; handle_event({node_up, Node}, State) -> %% Must do this via notify and not call to avoid possible deadlock. @@ -107,11 +99,8 @@ handle_event({node_up, Node}, State) -> {register, self(), {?MODULE, remote_conserve_memory, []}}), {ok, State}; -handle_event({node_down, Node}, State = #alarms{alarmed_nodes = AN, - alertees = Alertees}) -> - AN1 = sets:del_element(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), - {ok, State#alarms{alarmed_nodes = AN1}}; +handle_event({node_down, Node}, State) -> + {ok, maybe_alert(fun sets:del_element/2, Node, State)}; handle_event({register, Pid, HighMemMFA}, State) -> {ok, internal_register(Pid, HighMemMFA, State)}; @@ -134,6 +123,12 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- +maybe_alert(SetFun, Node, State = #alarms{alarmed_nodes = AN, + alertees = Alertees}) -> + AN1 = SetFun(Node, AN), + ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), + State#alarms{alarmed_nodes = AN1}. + maybe_alert(BeforeSize, AfterSize, Alertees, AlmNde) -> ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, AlmNde =:= node()), ok = maybe_alert_local(BeforeSize, AfterSize, Alertees). -- cgit v1.2.1 From 2570ed2e55fb40e2853652a5e94719ddb2a9e78e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 08:29:11 +0000 Subject: yet more simplification --- src/rabbit_alarm.erl | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 34cc4d3c..9f88d8da 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -126,31 +126,25 @@ code_change(_OldVsn, State, _Extra) -> maybe_alert(SetFun, Node, State = #alarms{alarmed_nodes = AN, alertees = Alertees}) -> AN1 = SetFun(Node, AN), - ok = maybe_alert(sets:size(AN), sets:size(AN1), Alertees, Node), + BeforeSz = sets:size(AN), + AfterSz = sets:size(AN1), + %% If we have changed our alarm state, inform the remotes. + IsLocal = Node =:= node(), + if IsLocal andalso BeforeSz < AfterSz -> ok = alert_remote(true, Alertees); + IsLocal andalso BeforeSz > AfterSz -> ok = alert_remote(false, Alertees); + true -> ok + end, + %% If the overall alarm state has changed, inform the locals. + case {BeforeSz, AfterSz} of + {0, 1} -> ok = alert_local(true, Alertees); + {1, 0} -> ok = alert_local(false, Alertees); + {_, _} -> ok + end, State#alarms{alarmed_nodes = AN1}. -maybe_alert(BeforeSize, AfterSize, Alertees, AlmNde) -> - ok = maybe_alert_remote(BeforeSize, AfterSize, Alertees, AlmNde =:= node()), - ok = maybe_alert_local(BeforeSize, AfterSize, Alertees). +alert_local(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=:='/2). -%% If we have changed our alarm state, always inform the remotes. -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) - when BeforeSize < AfterSize -> alert_remote(true, Alertees); -maybe_alert_remote(BeforeSize, AfterSize, Alertees, true) - when BeforeSize > AfterSize -> alert_remote(false, Alertees); -maybe_alert_remote(_BeforeSize, _AfterSize, _Alertees, _IsLocalNode) -> - ok. - -%% If the overall alarm state has changed, inform the locals. -maybe_alert_local(0, 1, Alertees) -> alert_local(true, Alertees); -maybe_alert_local(1, 0, Alertees) -> alert_local(false, Alertees); -maybe_alert_local(_, _, _Alertees) -> ok. - -alert_local(Alert, Alertees) -> - alert(Alert, Alertees, fun erlang:'=:='/2). - -alert_remote(Alert, Alertees) -> - alert(Alert, Alertees, fun erlang:'=/='/2). +alert_remote(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=/='/2). alert(Alert, Alertees, NodeComparator) -> Node = node(), -- cgit v1.2.1 From 976787bbbaf1ebbae5e7c620f8b8ae40f55afd71 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 08:34:11 +0000 Subject: cosmetic --- src/rabbit_alarm.erl | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl index 9f88d8da..d38ecb91 100644 --- a/src/rabbit_alarm.erl +++ b/src/rabbit_alarm.erl @@ -148,20 +148,19 @@ alert_remote(Alert, Alertees) -> alert(Alert, Alertees, fun erlang:'=/='/2). alert(Alert, Alertees, NodeComparator) -> Node = node(), - dict:fold(fun (Pid, {M, F, A}, Acc) -> + dict:fold(fun (Pid, {M, F, A}, ok) -> case NodeComparator(Node, node(Pid)) of - true -> ok = erlang:apply(M, F, A ++ [Pid, Alert]), - Acc; - false -> Acc + true -> apply(M, F, A ++ [Pid, Alert]); + false -> ok end end, ok, Alertees). internal_register(Pid, {M, F, A} = HighMemMFA, State = #alarms{alertees = Alertees}) -> _MRef = erlang:monitor(process, Pid), - ok = case sets:is_element(node(), State#alarms.alarmed_nodes) of - true -> apply(M, F, A ++ [Pid, true]); - false -> ok - end, + case sets:is_element(node(), State#alarms.alarmed_nodes) of + true -> ok = apply(M, F, A ++ [Pid, true]); + false -> ok + end, NewAlertees = dict:store(Pid, HighMemMFA, Alertees), State#alarms{alertees = NewAlertees}. -- cgit v1.2.1 From c5b8dbd5f5526a815620f5d7385040b7fc91c4c3 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Fri, 4 Mar 2011 10:41:47 +0000 Subject: Reworked binding delete into single transaction again --- src/rabbit_exchange_type_topic.erl | 84 ++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 31 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 25cdcc31..08e16661 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -68,19 +68,56 @@ add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, _X, Bs) -> - ToDelete = - lists:foldl(fun(B = #binding{source = X, destination = D}, Acc) -> - [{FinalNode, _} | _] = binding_path(B), - [{X, FinalNode, D} | Acc] - end, [], Bs), + {ToDelete, Paths} = + lists:foldl( + fun(B = #binding{source = X, destination = D}, {Acc, PathAcc}) -> + Path = [{FinalNode, _} | _] = binding_path(B), + PathAcc1 = decrement_bindings(X, Path, maybe_add_path( + X, Path, PathAcc)), + {[{X, FinalNode, D} | Acc], PathAcc1} + end, {[], gb_trees:empty()}, Bs), + [trie_remove_binding(X, FinalNode, D) || {X, FinalNode, D} <- ToDelete], + [trie_remove_edge(X, Parent, Node, W) || + {{X, [{Node, W}, {Parent, _} | _ ]}, {0, 0}} + <- gb_trees:to_list(Paths)], ok; -remove_bindings(false, _X, Bs) -> - [rabbit_misc:execute_mnesia_transaction( - fun() -> remove_path_if_empty(X, binding_path(B)) end) - || B = #binding{source = X} <- Bs], +remove_bindings(false, _X, _Bs) -> ok. +maybe_add_path(_X, [{root, none}], PathAcc) -> + PathAcc; +maybe_add_path(X, Path, PathAcc) -> + case gb_trees:is_defined({X, Path}, PathAcc) of + true -> PathAcc; + false -> gb_trees:insert({X, Path}, counts(X, Path), PathAcc) + end. + +decrement_bindings(X, Path, PathAcc) -> + with_path_acc(fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, + X, Path, PathAcc). + +decrement_edges(X, Path, PathAcc) -> + with_path_acc(fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, + X, Path, PathAcc). + +with_path_acc(_Fun, _X, [{root, none}], PathAcc) -> + PathAcc; +with_path_acc(Fun, X, Path, PathAcc) -> + NewVal = Fun(gb_trees:get({X, Path}, PathAcc)), + NewPathAcc = gb_trees:update({X, Path}, NewVal, PathAcc), + case NewVal of + {0, 0} -> + [_ | ParentPath] = Path, + decrement_edges(X, ParentPath, + maybe_add_path(X, ParentPath, NewPathAcc)); + _ -> + NewPathAcc + end. + +counts(X, [{FinalNode, _} | _]) -> + {trie_binding_count(X, FinalNode), trie_child_count(X, FinalNode)}. + binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). @@ -151,15 +188,6 @@ follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) -> error -> {error, Acc, Words} end. -remove_path_if_empty(_, [{root, none}]) -> - ok; -remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) -> - case trie_has_any_bindings(X, Node) orelse trie_has_any_children(X, Node) of - true -> ok; - false -> trie_remove_edge(X, Parent, Node, W), - remove_path_if_empty(X, RestPath) - end. - trie_child(X, Node, Word) -> case mnesia:read(rabbit_topic_trie_edge, #trie_edge{exchange_name = X, @@ -204,21 +232,24 @@ trie_binding_op(X, Node, D, Op) -> destination = D}}, write). -trie_has_any_children(X, Node) -> - has_any(rabbit_topic_trie_edge, +trie_child_count(X, Node) -> + count(rabbit_topic_trie_edge, #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, node_id = Node, _ = '_'}, _ = '_'}). -trie_has_any_bindings(X, Node) -> - has_any(rabbit_topic_trie_binding, +trie_binding_count(X, Node) -> + count(rabbit_topic_trie_binding, #topic_trie_binding{ trie_binding = #trie_binding{exchange_name = X, node_id = Node, _ = '_'}, _ = '_'}). +count(Table, Match) -> + length(mnesia:match_object(Table, Match, read)). + trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, @@ -231,15 +262,6 @@ trie_remove_all_bindings(X) -> trie_binding = #trie_binding{exchange_name = X, _ = '_'}, _ = '_'}). -has_any(Table, MatchHead) -> - Select = mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read), - select_while_no_result(Select) /= '$end_of_table'. - -select_while_no_result({[], Cont}) -> - select_while_no_result(mnesia:select(Cont)); -select_while_no_result(Other) -> - Other. - remove_all(Table, Pattern) -> lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end, mnesia:match_object(Table, Pattern, write)). -- cgit v1.2.1 From b6058d0b1bef5c5f9eddff225ff2accc70eea086 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 15:41:25 +0000 Subject: beginnings of decoupling bq from amqqueue - parameterise bq with callbacks --- include/rabbit_backing_queue_spec.hrl | 6 +- src/rabbit_amqqueue_process.erl | 18 +++- src/rabbit_backing_queue.erl | 2 +- src/rabbit_tests.erl | 16 ++-- src/rabbit_variable_queue.erl | 162 ++++++++++++++++++---------------- 5 files changed, 114 insertions(+), 90 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index accb2c0e..2e4d1b0a 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -25,11 +25,13 @@ -type(message_properties_transformer() :: fun ((rabbit_types:message_properties()) -> rabbit_types:message_properties())). +-type(async_callback() :: fun ((fun ((state()) -> state())) -> 'ok')). +-type(sync_callback() :: fun ((fun ((state()) -> state())) -> 'ok' | 'error')). -spec(start/1 :: ([rabbit_amqqueue:name()]) -> 'ok'). -spec(stop/0 :: () -> 'ok'). --spec(init/3 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery()) -> - state()). +-spec(init/5 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery(), + async_callback(), sync_callback()) -> state()). -spec(terminate/1 :: (state()) -> state()). -spec(delete_and_terminate/1 :: (state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 44053593..cf2a3949 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -149,7 +149,7 @@ declare(Recover, From, ok = rabbit_memory_monitor:register( self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), - BQS = BQ:init(QName, IsDurable, Recover), + BQS = bq_init(BQ, QName, IsDurable, Recover), State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), @@ -159,6 +159,20 @@ declare(Recover, From, Q1 -> {stop, normal, {existing, Q1}, State} end. +bq_init(BQ, QName, IsDurable, Recover) -> + Self = self(), + BQ:init(QName, IsDurable, Recover, + fun (Fun) -> + rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + Self, Fun) + end, + fun (Fun) -> + rabbit_misc:with_exit_handler( + fun () -> error end, + fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( + Self, Fun) end) + end). + process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> lists:foldl(fun({Arg, Fun}, State1) -> case rabbit_misc:table_lookup(Arguments, Arg) of @@ -797,7 +811,7 @@ handle_call({init, Recover}, From, _ -> rabbit_log:warning( "Queue ~p exclusive owner went away~n", [QName]) end, - BQS = BQ:init(QName, IsDurable, Recover), + BQS = bq_init(BQ, QName, IsDurable, Recover), %% Rely on terminate to delete the queue. {stop, normal, State#q{backing_queue_state = BQS}} end; diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 6a21e10f..a8e201ea 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -33,7 +33,7 @@ behaviour_info(callbacks) -> {stop, 0}, %% Initialise the backing queue and its state. - {init, 3}, + {init, 5}, %% Called on queue shutdown when queue isn't being deleted. {terminate, 1}, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c6250df..99bb1c4b 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2003,6 +2003,10 @@ test_queue_index() -> passed. +variable_queue_init(QName, IsDurable, Recover) -> + rabbit_variable_queue:init(QName, IsDurable, Recover, + fun nop/1, fun nop/1, fun nop/2, fun nop/1). + variable_queue_publish(IsPersistent, Count, VQ) -> lists:foldl( fun (_N, VQN) -> @@ -2033,8 +2037,7 @@ assert_props(List, PropVals) -> with_fresh_variable_queue(Fun) -> ok = empty_test_queue(), - VQ = rabbit_variable_queue:init(test_queue(), true, false, - fun nop/2, fun nop/1), + VQ = variable_queue_init(test_queue(), true, false), S0 = rabbit_variable_queue:status(VQ), assert_props(S0, [{q1, 0}, {q2, 0}, {delta, {delta, undefined, 0, undefined}}, @@ -2209,8 +2212,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = rabbit_variable_queue:init(test_queue(), true, true, - fun nop/2, fun nop/1), + VQ7 = variable_queue_init(test_queue(), true, true), {{_Msg1, true, _AckTag1, Count1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), VQ9 = variable_queue_publish(false, 1, VQ8), @@ -2226,8 +2228,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> VQ4 = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:idle_timeout(VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = rabbit_variable_queue:init(test_queue(), true, true, - fun nop/2, fun nop/1), + VQ7 = variable_queue_init(test_queue(), true, true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), VQ8. @@ -2258,8 +2259,7 @@ test_queue_recover() -> {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} = rabbit_amqqueue:basic_get(Q1, self(), false), exit(QPid1, shutdown), - VQ1 = rabbit_variable_queue:init(QName, true, true, - fun nop/2, fun nop/1), + VQ1 = variable_queue_init(QName, true, true), {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 58a28d32..7f702409 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -16,7 +16,7 @@ -module(rabbit_variable_queue). --export([init/3, terminate/1, delete_and_terminate/1, +-export([init/5, terminate/1, delete_and_terminate/1, purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, @@ -27,7 +27,7 @@ -export([start/1, stop/0]). %% exported for testing only --export([start_msg_store/2, stop_msg_store/0, init/5]). +-export([start_msg_store/2, stop_msg_store/0, init/7]). %%---------------------------------------------------------------------------- %% Definitions: @@ -238,6 +238,9 @@ durable, transient_threshold, + async_callback, + sync_callback, + len, persistent_count, @@ -332,11 +335,14 @@ {any(), binary()}}, on_sync :: sync(), durable :: boolean(), + transient_threshold :: non_neg_integer(), + + async_callback :: async_callback(), + sync_callback :: sync_callback(), len :: non_neg_integer(), persistent_count :: non_neg_integer(), - transient_threshold :: non_neg_integer(), target_ram_count :: non_neg_integer() | 'infinity', ram_msg_count :: non_neg_integer(), ram_msg_count_prev :: non_neg_integer(), @@ -397,25 +403,26 @@ stop_msg_store() -> ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). -init(QueueName, IsDurable, Recover) -> - Self = self(), - init(QueueName, IsDurable, Recover, +init(QueueName, IsDurable, Recover, AsyncCallback, SyncCallback) -> + init(QueueName, IsDurable, Recover, AsyncCallback, SyncCallback, fun (Guids, ActionTaken) -> - msgs_written_to_disk(Self, Guids, ActionTaken) + msgs_written_to_disk(AsyncCallback, Guids, ActionTaken) end, - fun (Guids) -> msg_indices_written_to_disk(Self, Guids) end). + fun (Guids) -> msg_indices_written_to_disk(AsyncCallback, Guids) end). -init(QueueName, IsDurable, false, MsgOnDiskFun, MsgIdxOnDiskFun) -> +init(QueueName, IsDurable, false, AsyncCallback, SyncCallback, + MsgOnDiskFun, MsgIdxOnDiskFun) -> IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), - init(IsDurable, IndexState, 0, [], + init(IsDurable, IndexState, 0, [], AsyncCallback, SyncCallback, case IsDurable of true -> msg_store_client_init(?PERSISTENT_MSG_STORE, - MsgOnDiskFun); + MsgOnDiskFun, AsyncCallback); false -> undefined end, - msg_store_client_init(?TRANSIENT_MSG_STORE, undefined)); + msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback)); -init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> +init(QueueName, true, true, AsyncCallback, SyncCallback, + MsgOnDiskFun, MsgIdxOnDiskFun) -> Terms = rabbit_queue_index:shutdown_terms(QueueName), {PRef, TRef, Terms1} = case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of @@ -425,9 +432,9 @@ init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> _ -> {rabbit_guid:guid(), rabbit_guid:guid(), []} end, PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef, - MsgOnDiskFun), + MsgOnDiskFun, AsyncCallback), TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE, TRef, - undefined), + undefined, AsyncCallback), {DeltaCount, IndexState} = rabbit_queue_index:recover( QueueName, Terms1, @@ -437,7 +444,7 @@ init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> end, MsgIdxOnDiskFun), init(true, IndexState, DeltaCount, Terms1, - PersistentClient, TransientClient). + PersistentClient, TransientClient, AsyncCallback, SyncCallback). terminate(State) -> State1 = #vqstate { persistent_count = PCount, @@ -512,9 +519,9 @@ publish(Msg, MsgProps, State) -> publish_delivered(false, #basic_message { guid = Guid }, #message_properties { needs_confirming = NeedsConfirming }, - State = #vqstate { len = 0 }) -> + State = #vqstate { async_callback = Callback, len = 0 }) -> case NeedsConfirming of - true -> blind_confirm(self(), gb_sets:singleton(Guid)); + true -> blind_confirm(Callback, gb_sets:singleton(Guid)); false -> ok end, {undefined, a(State)}; @@ -685,6 +692,8 @@ tx_rollback(Txn, State = #vqstate { durable = IsDurable, tx_commit(Txn, Fun, MsgPropsFun, State = #vqstate { durable = IsDurable, + async_callback = AsyncCallback, + sync_callback = SyncCallback, msg_store_clients = MSCState }) -> #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), erase_tx(Txn), @@ -696,7 +705,8 @@ tx_commit(Txn, Fun, MsgPropsFun, true -> ok = msg_store_sync( MSCState, true, PersistentGuids, msg_store_callback(PersistentGuids, Pubs, AckTags1, - Fun, MsgPropsFun)), + Fun, MsgPropsFun, + AsyncCallback, SyncCallback)), State; false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, Fun, MsgPropsFun, State) @@ -929,13 +939,13 @@ with_immutable_msg_store_state(MSCState, IsPersistent, Fun) -> end), Res. -msg_store_client_init(MsgStore, MsgOnDiskFun) -> - msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun). +msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) -> + msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun, Callback). -msg_store_client_init(MsgStore, Ref, MsgOnDiskFun) -> +msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) -> rabbit_msg_store:client_init( MsgStore, Ref, MsgOnDiskFun, - msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE)). + msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE, Callback)). msg_store_write(MSCState, IsPersistent, Guid, Msg) -> with_immutable_msg_store_state( @@ -967,16 +977,13 @@ msg_store_close_fds(MSCState, IsPersistent) -> MSCState, IsPersistent, fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). -msg_store_close_fds_fun(IsPersistent) -> - Self = self(), - fun () -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - Self, - fun (State = #vqstate { msg_store_clients = MSCState }) -> - {ok, MSCState1} = - msg_store_close_fds(MSCState, IsPersistent), - {[], State #vqstate { msg_store_clients = MSCState1 }} - end) +msg_store_close_fds_fun(IsPersistent, Callback) -> + fun () -> Callback( + fun (State = #vqstate { msg_store_clients = MSCState }) -> + {ok, MSCState1} = + msg_store_close_fds(MSCState, IsPersistent), + {[], State #vqstate { msg_store_clients = MSCState1 }} + end) end. maybe_write_delivered(false, _SeqId, IndexState) -> @@ -1062,7 +1069,7 @@ update_rate(Now, Then, Count, {OThen, OCount}) -> %%---------------------------------------------------------------------------- init(IsDurable, IndexState, DeltaCount, Terms, - PersistentClient, TransientClient) -> + PersistentClient, TransientClient, AsyncCallback, SyncCallback) -> {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount), @@ -1088,6 +1095,9 @@ init(IsDurable, IndexState, DeltaCount, Terms, durable = IsDurable, transient_threshold = NextSeqId, + async_callback = AsyncCallback, + sync_callback = SyncCallback, + len = DeltaCount1, persistent_count = DeltaCount1, @@ -1114,23 +1124,24 @@ blank_rate(Timestamp, IngressLength) -> avg_ingress = 0.0, timestamp = Timestamp }. -msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> - Self = self(), - F = fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( - Self, fun (StateN) -> {[], tx_commit_post_msg_store( - true, Pubs, AckTags, - Fun, MsgPropsFun, StateN)} - end) - end, - fun () -> spawn(fun () -> ok = rabbit_misc:with_exit_handler( - fun () -> remove_persistent_messages( - PersistentGuids) - end, F) +msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun, + AsyncCallback, SyncCallback) -> + fun () -> spawn(fun () -> case SyncCallback( + fun (StateN) -> + tx_commit_post_msg_store( + true, Pubs, AckTags, + Fun, MsgPropsFun, StateN) + end) of + ok -> ok; + error -> remove_persistent_messages( + PersistentGuids, AsyncCallback) + end end) end. -remove_persistent_messages(Guids) -> - PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, undefined), +remove_persistent_messages(Guids, AsyncCallback) -> + PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, + undefined, AsyncCallback), ok = rabbit_msg_store:remove(Guids, PersistentClient), rabbit_msg_store:client_delete_and_terminate(PersistentClient). @@ -1442,35 +1453,32 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, msgs_confirmed(GuidSet, State) -> {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. -blind_confirm(QPid, GuidSet) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State) -> msgs_confirmed(GuidSet, State) end). - -msgs_written_to_disk(QPid, GuidSet, removed) -> - blind_confirm(QPid, GuidSet); -msgs_written_to_disk(QPid, GuidSet, written) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), - State #vqstate { - msgs_on_disk = - gb_sets:union( - MOD, gb_sets:intersection(UC, GuidSet)) }) - end). - -msg_indices_written_to_disk(QPid, GuidSet) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), - State #vqstate { - msg_indices_on_disk = - gb_sets:union( - MIOD, gb_sets:intersection(UC, GuidSet)) }) - end). +blind_confirm(Callback, GuidSet) -> + Callback(fun (State) -> msgs_confirmed(GuidSet, State) end). + +msgs_written_to_disk(Callback, GuidSet, removed) -> + blind_confirm(Callback, GuidSet); +msgs_written_to_disk(Callback, GuidSet, written) -> + Callback(fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:union( + MOD, gb_sets:intersection(UC, GuidSet)) }) + end). + +msg_indices_written_to_disk(Callback, GuidSet) -> + Callback(fun (State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> + msgs_confirmed(gb_sets:intersection(GuidSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:union( + MIOD, gb_sets:intersection(UC, GuidSet)) }) + end). %%---------------------------------------------------------------------------- %% Phase changes -- cgit v1.2.1 From 55494a8fe0850e22c57609e41f6c525a80064991 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 4 Mar 2011 17:16:32 +0000 Subject: compromise renaming --- src/rabbit_channel.erl | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index fe6522fe..77960e45 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -298,8 +298,10 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, State = #ch{consumer_monitors = ConsumerMonitors}) -> noreply( case dict:find(MRef, ConsumerMonitors) of - error -> handle_queue_down(QPid, Reason, State); - {ok, ConsumerTag} -> handle_consumer_down(MRef, ConsumerTag, State) + error -> + handle_publishing_queue_down(QPid, Reason, State); + {ok, ConsumerTag} -> + handle_consuming_queue_down(MRef, ConsumerTag, State) end). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> @@ -1103,7 +1105,7 @@ monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping, State end. -handle_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> +handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> MsgSeqNos = case gb_trees:lookup(QPid, UQM) of {value, MsgSet} -> gb_sets:to_list(MsgSet); none -> [] @@ -1120,10 +1122,10 @@ handle_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> end)(MXs, State2), queue_blocked(QPid, State3). -handle_consumer_down(MRef, ConsumerTag, - State = #ch{consumer_mapping = ConsumerMapping, - consumer_monitors = ConsumerMonitors, - writer_pid = WriterPid}) -> +handle_consuming_queue_down(MRef, ConsumerTag, + State = #ch{consumer_mapping = ConsumerMapping, + consumer_monitors = ConsumerMonitors, + writer_pid = WriterPid}) -> ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping), ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors), Cancel = #'basic.cancel'{consumer_tag = ConsumerTag, -- cgit v1.2.1 From b155306db41afb224a90bd20f142700c42a97efc Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 17:22:18 +0000 Subject: introduce separate type for msg ids and add some auxiliary types for fun params to a) make their purpose clearer, and b) work around emacs indentation bugs --- include/rabbit_msg_store_index.hrl | 8 ++++---- src/rabbit_amqqueue.erl | 4 ++-- src/rabbit_msg_file.erl | 12 +++++++----- src/rabbit_msg_store.erl | 22 +++++++++++----------- src/rabbit_queue_index.erl | 21 ++++++++++----------- src/rabbit_types.erl | 5 +++-- 6 files changed, 37 insertions(+), 35 deletions(-) diff --git a/include/rabbit_msg_store_index.hrl b/include/rabbit_msg_store_index.hrl index 289f8f60..2ae5b000 100644 --- a/include/rabbit_msg_store_index.hrl +++ b/include/rabbit_msg_store_index.hrl @@ -29,13 +29,13 @@ -spec(new/1 :: (dir()) -> index_state()). -spec(recover/1 :: (dir()) -> rabbit_types:ok_or_error2(index_state(), any())). -spec(lookup/2 :: - (rabbit_guid:guid(), index_state()) -> ('not_found' | keyvalue())). + (rabbit_types:msg_id(), index_state()) -> ('not_found' | keyvalue())). -spec(insert/2 :: (keyvalue(), index_state()) -> 'ok'). -spec(update/2 :: (keyvalue(), index_state()) -> 'ok'). --spec(update_fields/3 :: (rabbit_guid:guid(), ({fieldpos(), fieldvalue()} | - [{fieldpos(), fieldvalue()}]), +-spec(update_fields/3 :: (rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} | + [{fieldpos(), fieldvalue()}]), index_state()) -> 'ok'). --spec(delete/2 :: (rabbit_guid:guid(), index_state()) -> 'ok'). +-spec(delete/2 :: (rabbit_types:msg_id(), index_state()) -> 'ok'). -spec(delete_object/2 :: (keyvalue(), index_state()) -> 'ok'). -spec(delete_by_file/2 :: (fieldvalue(), index_state()) -> 'ok'). -spec(terminate/1 :: (index_state()) -> any()). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 46b78c39..bbeff1f7 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -141,9 +141,9 @@ fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). -spec(maybe_run_queue_via_backing_queue/2 :: - (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). + (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(maybe_run_queue_via_backing_queue_async/2 :: - (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). + (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 55e6ac47..71b4aa6f 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -39,15 +39,17 @@ -type(position() :: non_neg_integer()). -type(msg_size() :: non_neg_integer()). -type(file_size() :: non_neg_integer()). +-type(message_accumulator(A) :: + fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) -> + A)). --spec(append/3 :: (io_device(), rabbit_guid:guid(), msg()) -> +-spec(append/3 :: (io_device(), rabbit_types:msg_id(), msg()) -> rabbit_types:ok_or_error2(msg_size(), any())). -spec(read/2 :: (io_device(), msg_size()) -> - rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, + rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()}, any())). --spec(scan/4 :: (io_device(), file_size(), - fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), - A) -> {'ok', A, position()}). +-spec(scan/4 :: (io_device(), file_size(), message_accumulator(A), A) -> + {'ok', A, position()}). -endif. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 9e65e442..02811da7 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -132,30 +132,30 @@ file_summary_ets :: ets:tid(), dedup_cache_ets :: ets:tid(), cur_file_cache_ets :: ets:tid()}). --type(startup_fun_state() :: - {(fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A})), - A}). +-type(msg_ref_delta_gen(A) :: + fun ((A) -> 'finished' | + {rabbit_types:msg_id(), non_neg_integer(), A})). -type(maybe_guid_fun() :: 'undefined' | fun ((gb_set()) -> any())). -type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')). -type(deletion_thunk() :: fun (() -> boolean())). -spec(start_link/4 :: (atom(), file:filename(), [binary()] | 'undefined', - startup_fun_state()) -> rabbit_types:ok_pid_or_error()). + {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()). -spec(successfully_recovered_state/1 :: (server()) -> boolean()). -spec(client_init/4 :: (server(), client_ref(), maybe_guid_fun(), maybe_close_fds_fun()) -> client_msstate()). -spec(client_terminate/1 :: (client_msstate()) -> 'ok'). -spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok'). -spec(client_ref/1 :: (client_msstate()) -> client_ref()). --spec(write/3 :: (rabbit_guid:guid(), msg(), client_msstate()) -> 'ok'). --spec(read/2 :: (rabbit_guid:guid(), client_msstate()) -> +-spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'). +-spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) -> {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). --spec(contains/2 :: (rabbit_guid:guid(), client_msstate()) -> boolean()). --spec(remove/2 :: ([rabbit_guid:guid()], client_msstate()) -> 'ok'). --spec(release/2 :: ([rabbit_guid:guid()], client_msstate()) -> 'ok'). --spec(sync/3 :: ([rabbit_guid:guid()], fun (() -> any()), client_msstate()) -> - 'ok'). +-spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). +-spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). +-spec(release/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). +-spec(sync/3 :: + ([rabbit_types:msg_id()], fun (() -> any()), client_msstate()) -> 'ok'). -spec(sync/1 :: (server()) -> 'ok'). -spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok'). diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 76b1136f..7b5aa120 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -187,21 +187,21 @@ dirty_count :: integer(), max_journal_entries :: non_neg_integer(), on_sync :: on_sync_fun(), - unsynced_guids :: [rabbit_guid:guid()] - }). --type(startup_fun_state() :: - {fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A}), - A}). + unsynced_guids :: [rabbit_types:msg_id()] + }). +-type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())). +-type(walker(A) :: fun ((A) -> 'finished' | + {rabbit_types:msg_id(), non_neg_integer(), A})). -type(shutdown_terms() :: [any()]). -spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()). -spec(shutdown_terms/1 :: (rabbit_amqqueue:name()) -> shutdown_terms()). -spec(recover/5 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(), - fun ((rabbit_guid:guid()) -> boolean()), on_sync_fun()) -> - {'undefined' | non_neg_integer(), qistate()}). + contains_predicate(), on_sync_fun()) -> + {'undefined' | non_neg_integer(), qistate()}). -spec(terminate/2 :: ([any()], qistate()) -> qistate()). -spec(delete_and_terminate/1 :: (qistate()) -> qistate()). --spec(publish/5 :: (rabbit_guid:guid(), seq_id(), +-spec(publish/5 :: (rabbit_types:msg_id(), seq_id(), rabbit_types:message_properties(), boolean(), qistate()) -> qistate()). -spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()). @@ -209,14 +209,13 @@ -spec(sync/2 :: ([seq_id()], qistate()) -> qistate()). -spec(flush/1 :: (qistate()) -> qistate()). -spec(read/3 :: (seq_id(), seq_id(), qistate()) -> - {[{rabbit_guid:guid(), seq_id(), + {[{rabbit_types:msg_id(), seq_id(), rabbit_types:message_properties(), boolean(), boolean()}], qistate()}). -spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()). -spec(bounds/1 :: (qistate()) -> {non_neg_integer(), non_neg_integer(), qistate()}). --spec(recover/1 :: ([rabbit_amqqueue:name()]) -> - {[[any()]], startup_fun_state()}). +-spec(recover/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}). -spec(add_queue_ttl/0 :: () -> 'ok'). diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index ab2300c0..899291f2 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -21,7 +21,7 @@ -ifdef(use_specs). -export_type([txn/0, maybe/1, info/0, infos/0, info_key/0, info_keys/0, - message/0, basic_message/0, + message/0, msg_id/0, basic_message/0, delivery/0, content/0, decoded_content/0, undecoded_content/0, unencoded_content/0, encoded_content/0, message_properties/0, vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0, @@ -62,11 +62,12 @@ properties_bin :: binary(), payload_fragments_rev :: [binary()]}). -type(content() :: undecoded_content() | decoded_content()). +-type(msg_id() :: rabbit_guid:guid()). -type(basic_message() :: #basic_message{exchange_name :: rabbit_exchange:name(), routing_keys :: [rabbit_router:routing_key()], content :: content(), - guid :: rabbit_guid:guid(), + guid :: msg_id(), is_persistent :: boolean()}). -type(message() :: basic_message()). -type(delivery() :: -- cgit v1.2.1 From c0304ad94f0862f6cae9d33dac434144b17ea309 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 17:46:50 +0000 Subject: guid -> msg_id in msg_store and friends --- include/rabbit_msg_store.hrl | 3 +- src/rabbit_msg_file.erl | 31 ++-- src/rabbit_msg_store.erl | 341 +++++++++++++++++++------------------ src/rabbit_msg_store_ets_index.erl | 2 +- 4 files changed, 190 insertions(+), 187 deletions(-) diff --git a/include/rabbit_msg_store.hrl b/include/rabbit_msg_store.hrl index 9d704f65..e9150a97 100644 --- a/include/rabbit_msg_store.hrl +++ b/include/rabbit_msg_store.hrl @@ -22,5 +22,4 @@ -endif. --record(msg_location, - {guid, ref_count, file, offset, total_size}). +-record(msg_location, {msg_id, ref_count, file, offset, total_size}). diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 71b4aa6f..22ad3d05 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -27,8 +27,8 @@ -define(WRITE_OK_SIZE_BITS, 8). -define(WRITE_OK_MARKER, 255). -define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)). --define(GUID_SIZE_BYTES, 16). --define(GUID_SIZE_BITS, (8 * ?GUID_SIZE_BYTES)). +-define(MSG_ID_SIZE_BYTES, 16). +-define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)). -define(SCAN_BLOCK_SIZE, 4194304). %% 4MB %%---------------------------------------------------------------------------- @@ -55,14 +55,14 @@ %%---------------------------------------------------------------------------- -append(FileHdl, Guid, MsgBody) - when is_binary(Guid) andalso size(Guid) =:= ?GUID_SIZE_BYTES -> +append(FileHdl, MsgId, MsgBody) + when is_binary(MsgId) andalso size(MsgId) =:= ?MSG_ID_SIZE_BYTES -> MsgBodyBin = term_to_binary(MsgBody), MsgBodyBinSize = size(MsgBodyBin), - Size = MsgBodyBinSize + ?GUID_SIZE_BYTES, + Size = MsgBodyBinSize + ?MSG_ID_SIZE_BYTES, case file_handle_cache:append(FileHdl, <>) of ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT}; @@ -71,13 +71,13 @@ append(FileHdl, Guid, MsgBody) read(FileHdl, TotalSize) -> Size = TotalSize - ?FILE_PACKING_ADJUSTMENT, - BodyBinSize = Size - ?GUID_SIZE_BYTES, + BodyBinSize = Size - ?MSG_ID_SIZE_BYTES, case file_handle_cache:read(FileHdl, TotalSize) of {ok, <>} -> - {ok, {Guid, binary_to_term(MsgBodyBin)}}; + {ok, {MsgId, binary_to_term(MsgBodyBin)}}; KO -> KO end. @@ -102,21 +102,22 @@ scanner(<<>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> {<<>>, Acc, Offset}; %% Nothing to do other than stop. -scanner(<>, Offset, Fun, Acc) -> TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, case WriteMarker of ?WRITE_OK_MARKER -> %% Here we take option 5 from %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the Guid as a number, and then convert it + %% which we read the MsgId as a number, and then convert it %% back to a binary in order to work around bugs in %% Erlang's GC. - <> = - <>, - <> = <>, + <> = + <>, + <> = + <>, scanner(Rest, Offset + TotalSize, Fun, - Fun({Guid, TotalSize, Offset, Msg}, Acc)); + Fun({MsgId, TotalSize, Offset, Msg}, Acc)); _ -> scanner(Rest, Offset + TotalSize, Fun, Acc) end; diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 02811da7..771835a1 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -74,7 +74,7 @@ %% to callbacks successfully_recovered, %% boolean: did we recover state? file_size_limit, %% how big are our files allowed to get? - cref_to_guids %% client ref to synced messages mapping + cref_to_msg_ids %% client ref to synced messages mapping }). -record(client_msstate, @@ -135,7 +135,7 @@ -type(msg_ref_delta_gen(A) :: fun ((A) -> 'finished' | {rabbit_types:msg_id(), non_neg_integer(), A})). --type(maybe_guid_fun() :: 'undefined' | fun ((gb_set()) -> any())). +-type(maybe_msg_id_fun() :: 'undefined' | fun ((gb_set()) -> any())). -type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')). -type(deletion_thunk() :: fun (() -> boolean())). @@ -143,7 +143,7 @@ (atom(), file:filename(), [binary()] | 'undefined', {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()). -spec(successfully_recovered_state/1 :: (server()) -> boolean()). --spec(client_init/4 :: (server(), client_ref(), maybe_guid_fun(), +-spec(client_init/4 :: (server(), client_ref(), maybe_msg_id_fun(), maybe_close_fds_fun()) -> client_msstate()). -spec(client_terminate/1 :: (client_msstate()) -> 'ok'). -spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok'). @@ -177,8 +177,8 @@ %% The components: %% -%% Index: this is a mapping from Guid to #msg_location{}: -%% {Guid, RefCount, File, Offset, TotalSize} +%% Index: this is a mapping from MsgId to #msg_location{}: +%% {MsgId, RefCount, File, Offset, TotalSize} %% By default, it's in ets, but it's also pluggable. %% FileSummary: this is an ets table which maps File to #file_summary{}: %% {File, ValidTotalSize, Left, Right, FileSize, Locked, Readers} @@ -279,7 +279,7 @@ %% alternating full files and files with only one tiny message in %% them). %% -%% Messages are reference-counted. When a message with the same guid +%% Messages are reference-counted. When a message with the same msg id %% is written several times we only store it once, and only remove it %% from the store when it has been removed the same number of times. %% @@ -422,29 +422,29 @@ client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) -> client_ref(#client_msstate { client_ref = Ref }) -> Ref. -write(Guid, Msg, +write(MsgId, Msg, CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts, client_ref = CRef }) -> - ok = update_msg_cache(CurFileCacheEts, Guid, Msg), - ok = server_cast(CState, {write, CRef, Guid}). + ok = update_msg_cache(CurFileCacheEts, MsgId, Msg), + ok = server_cast(CState, {write, CRef, MsgId}). -read(Guid, +read(MsgId, CState = #client_msstate { dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts }) -> %% 1. Check the dedup cache - case fetch_and_increment_cache(DedupCacheEts, Guid) of + case fetch_and_increment_cache(DedupCacheEts, MsgId) of not_found -> %% 2. Check the cur file cache - case ets:lookup(CurFileCacheEts, Guid) of + case ets:lookup(CurFileCacheEts, MsgId) of [] -> Defer = fun() -> - {server_call(CState, {read, Guid}), CState} + {server_call(CState, {read, MsgId}), CState} end, - case index_lookup_positive_ref_count(Guid, CState) of + case index_lookup_positive_ref_count(MsgId, CState) of not_found -> Defer(); MsgLocation -> client_read1(MsgLocation, Defer, CState) end; - [{Guid, Msg, _CacheRefCount}] -> + [{MsgId, Msg, _CacheRefCount}] -> %% Although we've found it, we don't know the %% refcount, so can't insert into dedup cache {{ok, Msg}, CState} @@ -453,13 +453,13 @@ read(Guid, {{ok, Msg}, CState} end. -contains(Guid, CState) -> server_call(CState, {contains, Guid}). +contains(MsgId, CState) -> server_call(CState, {contains, MsgId}). remove([], _CState) -> ok; -remove(Guids, CState = #client_msstate { client_ref = CRef }) -> - server_cast(CState, {remove, CRef, Guids}). +remove(MsgIds, CState = #client_msstate { client_ref = CRef }) -> + server_cast(CState, {remove, CRef, MsgIds}). release([], _CState) -> ok; -release(Guids, CState) -> server_cast(CState, {release, Guids}). -sync(Guids, K, CState) -> server_cast(CState, {sync, Guids, K}). +release(MsgIds, CState) -> server_cast(CState, {release, MsgIds}). +sync(MsgIds, K, CState) -> server_cast(CState, {sync, MsgIds, K}). sync(Server) -> gen_server2:cast(Server, sync). @@ -477,11 +477,11 @@ server_call(#client_msstate { server = Server }, Msg) -> server_cast(#client_msstate { server = Server }, Msg) -> gen_server2:cast(Server, Msg). -client_read1(#msg_location { guid = Guid, file = File } = MsgLocation, Defer, +client_read1(#msg_location { msg_id = MsgId, file = File } = MsgLocation, Defer, CState = #client_msstate { file_summary_ets = FileSummaryEts }) -> case ets:lookup(FileSummaryEts, File) of [] -> %% File has been GC'd and no longer exists. Go around again. - read(Guid, CState); + read(MsgId, CState); [#file_summary { locked = Locked, right = Right }] -> client_read2(Locked, Right, MsgLocation, Defer, CState) end. @@ -503,7 +503,7 @@ client_read2(true, _Right, _MsgLocation, Defer, _CState) -> %% the safest and simplest thing to do. Defer(); client_read2(false, _Right, - MsgLocation = #msg_location { guid = Guid, file = File }, + MsgLocation = #msg_location { msg_id = MsgId, file = File }, Defer, CState = #client_msstate { file_summary_ets = FileSummaryEts }) -> %% It's entirely possible that everything we're doing from here on @@ -512,9 +512,9 @@ client_read2(false, _Right, safe_ets_update_counter( FileSummaryEts, File, {#file_summary.readers, +1}, fun (_) -> client_read3(MsgLocation, Defer, CState) end, - fun () -> read(Guid, CState) end). + fun () -> read(MsgId, CState) end). -client_read3(#msg_location { guid = Guid, file = File }, Defer, +client_read3(#msg_location { msg_id = MsgId, file = File }, Defer, CState = #client_msstate { file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, dedup_cache_ets = DedupCacheEts, @@ -539,7 +539,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% too). case ets:lookup(FileSummaryEts, File) of [] -> %% GC has deleted our file, just go round again. - read(Guid, CState); + read(MsgId, CState); [#file_summary { locked = true }] -> %% If we get a badarg here, then the GC has finished and %% deleted our file. Try going around again. Otherwise, @@ -550,7 +550,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% unlocks the dest) try Release(), Defer() - catch error:badarg -> read(Guid, CState) + catch error:badarg -> read(MsgId, CState) end; [#file_summary { locked = false }] -> %% Ok, we're definitely safe to continue - a GC involving @@ -563,7 +563,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% us doing the lookup and the +1 on the readers. (Same as %% badarg scenario above, but we don't have a missing file %% - we just have the /wrong/ file). - case index_lookup(Guid, CState) of + case index_lookup(MsgId, CState) of #msg_location { file = File } = MsgLocation -> %% Still the same file. {ok, CState1} = close_all_indicated(CState), @@ -589,9 +589,9 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, end end. -clear_client(CRef, State = #msstate { cref_to_guids = CTG, +clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM, dying_clients = DyingClients }) -> - State #msstate { cref_to_guids = dict:erase(CRef, CTG), + State #msstate { cref_to_msg_ids = dict:erase(CRef, CTM), dying_clients = sets:del_element(CRef, DyingClients) }. @@ -666,7 +666,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> clients = Clients, successfully_recovered = CleanShutdown, file_size_limit = FileSizeLimit, - cref_to_guids = dict:new() + cref_to_msg_ids = dict:new() }, %% If we didn't recover the msg location index then we need to @@ -698,7 +698,7 @@ prioritise_call(Msg, _From, _State) -> case Msg of successfully_recovered_state -> 7; {new_client_state, _Ref, _MODC, _CloseFDsFun} -> 7; - {read, _Guid} -> 2; + {read, _MsgId} -> 2; _ -> 0 end. @@ -733,12 +733,12 @@ handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, handle_call({client_terminate, CRef}, _From, State) -> reply(ok, clear_client(CRef, State)); -handle_call({read, Guid}, From, State) -> - State1 = read_message(Guid, From, State), +handle_call({read, MsgId}, From, State) -> + State1 = read_message(MsgId, From, State), noreply(State1); -handle_call({contains, Guid}, From, State) -> - State1 = contains_message(Guid, From, State), +handle_call({contains, MsgId}, From, State) -> + State1 = contains_message(MsgId, From, State), noreply(State1). handle_cast({client_dying, CRef}, @@ -751,53 +751,53 @@ handle_cast({client_delete, CRef}, State = #msstate { clients = Clients }) -> State1 = State #msstate { clients = dict:erase(CRef, Clients) }, noreply(remove_message(CRef, CRef, clear_client(CRef, State1))); -handle_cast({write, CRef, Guid}, +handle_cast({write, CRef, MsgId}, State = #msstate { cur_file_cache_ets = CurFileCacheEts }) -> - true = 0 =< ets:update_counter(CurFileCacheEts, Guid, {3, -1}), - [{Guid, Msg, _CacheRefCount}] = ets:lookup(CurFileCacheEts, Guid), + true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}), + [{MsgId, Msg, _CacheRefCount}] = ets:lookup(CurFileCacheEts, MsgId), noreply( - case write_action(should_mask_action(CRef, Guid, State), Guid, State) of + case write_action(should_mask_action(CRef, MsgId, State), MsgId, State) of {write, State1} -> - write_message(CRef, Guid, Msg, State1); + write_message(CRef, MsgId, Msg, State1); {ignore, CurFile, State1 = #msstate { current_file = CurFile }} -> State1; {ignore, _File, State1} -> - true = ets:delete_object(CurFileCacheEts, {Guid, Msg, 0}), + true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}), State1; {confirm, CurFile, State1 = #msstate { current_file = CurFile }}-> - record_pending_confirm(CRef, Guid, State1); + record_pending_confirm(CRef, MsgId, State1); {confirm, _File, State1} -> - true = ets:delete_object(CurFileCacheEts, {Guid, Msg, 0}), + true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}), update_pending_confirms( - fun (MsgOnDiskFun, CTG) -> - MsgOnDiskFun(gb_sets:singleton(Guid), written), - CTG + fun (MsgOnDiskFun, CTM) -> + MsgOnDiskFun(gb_sets:singleton(MsgId), written), + CTM end, CRef, State1) end); -handle_cast({remove, CRef, Guids}, State) -> +handle_cast({remove, CRef, MsgIds}, State) -> State1 = lists:foldl( - fun (Guid, State2) -> remove_message(Guid, CRef, State2) end, - State, Guids), - noreply(maybe_compact( - client_confirm(CRef, gb_sets:from_list(Guids), removed, State1))); + fun (MsgId, State2) -> remove_message(MsgId, CRef, State2) end, + State, MsgIds), + noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(MsgIds), + removed, State1))); -handle_cast({release, Guids}, State = +handle_cast({release, MsgIds}, State = #msstate { dedup_cache_ets = DedupCacheEts }) -> lists:foreach( - fun (Guid) -> decrement_cache(DedupCacheEts, Guid) end, Guids), + fun (MsgId) -> decrement_cache(DedupCacheEts, MsgId) end, MsgIds), noreply(State); -handle_cast({sync, Guids, K}, +handle_cast({sync, MsgIds, K}, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, on_sync = Syncs }) -> {ok, SyncOffset} = file_handle_cache:last_sync_offset(CurHdl), - case lists:any(fun (Guid) -> + case lists:any(fun (MsgId) -> #msg_location { file = File, offset = Offset } = - index_lookup(Guid, State), + index_lookup(MsgId, State), File =:= CurFile andalso Offset >= SyncOffset - end, Guids) of + end, MsgIds) of false -> K(), noreply(State); true -> noreply(State #msstate { on_sync = [K | Syncs] }) @@ -879,16 +879,16 @@ reply(Reply, State) -> {State1, Timeout} = next_state(State), {reply, Reply, State1, Timeout}. -next_state(State = #msstate { sync_timer_ref = undefined, - on_sync = Syncs, - cref_to_guids = CTG }) -> - case {Syncs, dict:size(CTG)} of +next_state(State = #msstate { sync_timer_ref = undefined, + on_sync = Syncs, + cref_to_msg_ids = CTM }) -> + case {Syncs, dict:size(CTM)} of {[], 0} -> {State, hibernate}; _ -> {start_sync_timer(State), 0} end; -next_state(State = #msstate { on_sync = Syncs, - cref_to_guids = CTG }) -> - case {Syncs, dict:size(CTG)} of +next_state(State = #msstate { on_sync = Syncs, + cref_to_msg_ids = CTM }) -> + case {Syncs, dict:size(CTM)} of {[], 0} -> {stop_sync_timer(State), hibernate}; _ -> {State, 0} end. @@ -905,66 +905,66 @@ stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> internal_sync(State = #msstate { current_file_handle = CurHdl, on_sync = Syncs, - cref_to_guids = CTG }) -> + cref_to_msg_ids = CTM }) -> State1 = stop_sync_timer(State), - CGs = dict:fold(fun (CRef, Guids, NS) -> - case gb_sets:is_empty(Guids) of + CGs = dict:fold(fun (CRef, MsgIds, NS) -> + case gb_sets:is_empty(MsgIds) of true -> NS; - false -> [{CRef, Guids} | NS] + false -> [{CRef, MsgIds} | NS] end - end, [], CTG), + end, [], CTM), case {Syncs, CGs} of {[], []} -> ok; _ -> file_handle_cache:sync(CurHdl) end, [K() || K <- lists:reverse(Syncs)], - [client_confirm(CRef, Guids, written, State1) || {CRef, Guids} <- CGs], - State1 #msstate { cref_to_guids = dict:new(), on_sync = [] }. + [client_confirm(CRef, MsgIds, written, State1) || {CRef, MsgIds} <- CGs], + State1 #msstate { cref_to_msg_ids = dict:new(), on_sync = [] }. -write_action({true, not_found}, _Guid, State) -> +write_action({true, not_found}, _MsgId, State) -> {ignore, undefined, State}; -write_action({true, #msg_location { file = File }}, _Guid, State) -> +write_action({true, #msg_location { file = File }}, _MsgId, State) -> {ignore, File, State}; -write_action({false, not_found}, _Guid, State) -> +write_action({false, not_found}, _MsgId, State) -> {write, State}; write_action({Mask, #msg_location { ref_count = 0, file = File, total_size = TotalSize }}, - Guid, State = #msstate { file_summary_ets = FileSummaryEts }) -> + MsgId, State = #msstate { file_summary_ets = FileSummaryEts }) -> case {Mask, ets:lookup(FileSummaryEts, File)} of {false, [#file_summary { locked = true }]} -> - ok = index_delete(Guid, State), + ok = index_delete(MsgId, State), {write, State}; {false_if_increment, [#file_summary { locked = true }]} -> - %% The msg for Guid is older than the client death + %% The msg for MsgId is older than the client death %% message, but as it is being GC'd currently we'll have %% to write a new copy, which will then be younger, so %% ignore this write. {ignore, File, State}; {_Mask, [#file_summary {}]} -> - ok = index_update_ref_count(Guid, 1, State), + ok = index_update_ref_count(MsgId, 1, State), State1 = adjust_valid_total_size(File, TotalSize, State), {confirm, File, State1} end; write_action({_Mask, #msg_location { ref_count = RefCount, file = File }}, - Guid, State) -> - ok = index_update_ref_count(Guid, RefCount + 1, State), + MsgId, State) -> + ok = index_update_ref_count(MsgId, RefCount + 1, State), %% We already know about it, just update counter. Only update %% field otherwise bad interaction with concurrent GC {confirm, File, State}. -write_message(CRef, Guid, Msg, State) -> - write_message(Guid, Msg, record_pending_confirm(CRef, Guid, State)). +write_message(CRef, MsgId, Msg, State) -> + write_message(MsgId, Msg, record_pending_confirm(CRef, MsgId, State)). -write_message(Guid, Msg, +write_message(MsgId, Msg, State = #msstate { current_file_handle = CurHdl, current_file = CurFile, sum_valid_data = SumValid, sum_file_size = SumFileSize, file_summary_ets = FileSummaryEts }) -> {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl), - {ok, TotalSize} = rabbit_msg_file:append(CurHdl, Guid, Msg), + {ok, TotalSize} = rabbit_msg_file:append(CurHdl, MsgId, Msg), ok = index_insert( - #msg_location { guid = Guid, ref_count = 1, file = CurFile, + #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile, offset = CurOffset, total_size = TotalSize }, State), [#file_summary { right = undefined, locked = false }] = ets:lookup(FileSummaryEts, CurFile), @@ -976,21 +976,21 @@ write_message(Guid, Msg, sum_valid_data = SumValid + TotalSize, sum_file_size = SumFileSize + TotalSize }). -read_message(Guid, From, +read_message(MsgId, From, State = #msstate { dedup_cache_ets = DedupCacheEts }) -> - case index_lookup_positive_ref_count(Guid, State) of + case index_lookup_positive_ref_count(MsgId, State) of not_found -> gen_server2:reply(From, not_found), State; MsgLocation -> - case fetch_and_increment_cache(DedupCacheEts, Guid) of + case fetch_and_increment_cache(DedupCacheEts, MsgId) of not_found -> read_message1(From, MsgLocation, State); Msg -> gen_server2:reply(From, {ok, Msg}), State end end. -read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, +read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, file = File, offset = Offset } = MsgLoc, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, @@ -1000,7 +1000,7 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, case File =:= CurFile of true -> {Msg, State1} = %% can return [] if msg in file existed on startup - case ets:lookup(CurFileCacheEts, Guid) of + case ets:lookup(CurFileCacheEts, MsgId) of [] -> {ok, RawOffSet} = file_handle_cache:current_raw_offset(CurHdl), @@ -1009,9 +1009,9 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, false -> ok end, read_from_disk(MsgLoc, State, DedupCacheEts); - [{Guid, Msg1, _CacheRefCount}] -> + [{MsgId, Msg1, _CacheRefCount}] -> ok = maybe_insert_into_cache( - DedupCacheEts, RefCount, Guid, Msg1), + DedupCacheEts, RefCount, MsgId, Msg1), {Msg1, State} end, gen_server2:reply(From, {ok, Msg}), @@ -1019,7 +1019,7 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, false -> [#file_summary { locked = Locked }] = ets:lookup(FileSummaryEts, File), case Locked of - true -> add_to_pending_gc_completion({read, Guid, From}, + true -> add_to_pending_gc_completion({read, MsgId, From}, File, State); false -> {Msg, State1} = read_from_disk(MsgLoc, State, DedupCacheEts), @@ -1028,47 +1028,47 @@ read_message1(From, #msg_location { guid = Guid, ref_count = RefCount, end end. -read_from_disk(#msg_location { guid = Guid, ref_count = RefCount, +read_from_disk(#msg_location { msg_id = MsgId, ref_count = RefCount, file = File, offset = Offset, total_size = TotalSize }, State, DedupCacheEts) -> {Hdl, State1} = get_read_handle(File, State), {ok, Offset} = file_handle_cache:position(Hdl, Offset), - {ok, {Guid, Msg}} = + {ok, {MsgId, Msg}} = case rabbit_msg_file:read(Hdl, TotalSize) of - {ok, {Guid, _}} = Obj -> + {ok, {MsgId, _}} = Obj -> Obj; Rest -> {error, {misread, [{old_state, State}, {file_num, File}, {offset, Offset}, - {guid, Guid}, + {msg_id, MsgId}, {read, Rest}, {proc_dict, get()} ]}} end, - ok = maybe_insert_into_cache(DedupCacheEts, RefCount, Guid, Msg), + ok = maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg), {Msg, State1}. -contains_message(Guid, From, +contains_message(MsgId, From, State = #msstate { pending_gc_completion = Pending }) -> - case index_lookup_positive_ref_count(Guid, State) of + case index_lookup_positive_ref_count(MsgId, State) of not_found -> gen_server2:reply(From, false), State; #msg_location { file = File } -> case orddict:is_key(File, Pending) of true -> add_to_pending_gc_completion( - {contains, Guid, From}, File, State); + {contains, MsgId, From}, File, State); false -> gen_server2:reply(From, true), State end end. -remove_message(Guid, CRef, +remove_message(MsgId, CRef, State = #msstate { file_summary_ets = FileSummaryEts, dedup_cache_ets = DedupCacheEts }) -> - case should_mask_action(CRef, Guid, State) of + case should_mask_action(CRef, MsgId, State) of {true, _Location} -> State; {false_if_increment, #msg_location { ref_count = 0 }} -> @@ -1081,24 +1081,25 @@ remove_message(Guid, CRef, total_size = TotalSize }} when RefCount > 0 -> %% only update field, otherwise bad interaction with %% concurrent GC - Dec = - fun () -> index_update_ref_count(Guid, RefCount - 1, State) end, + Dec = fun () -> + index_update_ref_count(MsgId, RefCount - 1, State) + end, case RefCount of %% don't remove from CUR_FILE_CACHE_ETS_NAME here %% because there may be further writes in the mailbox %% for the same msg. - 1 -> ok = remove_cache_entry(DedupCacheEts, Guid), + 1 -> ok = remove_cache_entry(DedupCacheEts, MsgId), case ets:lookup(FileSummaryEts, File) of [#file_summary { locked = true }] -> add_to_pending_gc_completion( - {remove, Guid, CRef}, File, State); + {remove, MsgId, CRef}, File, State); [#file_summary {}] -> ok = Dec(), delete_file_if_empty( File, adjust_valid_total_size(File, -TotalSize, State)) end; - _ -> ok = decrement_cache(DedupCacheEts, Guid), + _ -> ok = decrement_cache(DedupCacheEts, MsgId), ok = Dec(), State end @@ -1119,12 +1120,12 @@ run_pending(Files, State) -> lists:reverse(orddict:fetch(File, Pending))) end, State, Files). -run_pending_action({read, Guid, From}, State) -> - read_message(Guid, From, State); -run_pending_action({contains, Guid, From}, State) -> - contains_message(Guid, From, State); -run_pending_action({remove, Guid, CRef}, State) -> - remove_message(Guid, CRef, State). +run_pending_action({read, MsgId, From}, State) -> + read_message(MsgId, From, State); +run_pending_action({contains, MsgId, From}, State) -> + contains_message(MsgId, From, State); +run_pending_action({remove, MsgId, CRef}, State) -> + remove_message(MsgId, CRef, State). safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) -> try @@ -1146,44 +1147,46 @@ orddict_store(Key, Val, Dict) -> false = orddict:is_key(Key, Dict), orddict:store(Key, Val, Dict). -update_pending_confirms(Fun, CRef, State = #msstate { clients = Clients, - cref_to_guids = CTG }) -> +update_pending_confirms(Fun, CRef, + State = #msstate { clients = Clients, + cref_to_msg_ids = CTM }) -> case dict:fetch(CRef, Clients) of {undefined, _CloseFDsFun} -> State; - {MsgOnDiskFun, _CloseFDsFun} -> CTG1 = Fun(MsgOnDiskFun, CTG), - State #msstate { cref_to_guids = CTG1 } + {MsgOnDiskFun, _CloseFDsFun} -> CTM1 = Fun(MsgOnDiskFun, CTM), + State #msstate { + cref_to_msg_ids = CTM1 } end. -record_pending_confirm(CRef, Guid, State) -> +record_pending_confirm(CRef, MsgId, State) -> update_pending_confirms( - fun (_MsgOnDiskFun, CTG) -> - dict:update(CRef, fun (Guids) -> gb_sets:add(Guid, Guids) end, - gb_sets:singleton(Guid), CTG) + fun (_MsgOnDiskFun, CTM) -> + dict:update(CRef, fun (MsgIds) -> gb_sets:add(MsgId, MsgIds) end, + gb_sets:singleton(MsgId), CTM) end, CRef, State). -client_confirm(CRef, Guids, ActionTaken, State) -> +client_confirm(CRef, MsgIds, ActionTaken, State) -> update_pending_confirms( - fun (MsgOnDiskFun, CTG) -> - MsgOnDiskFun(Guids, ActionTaken), - case dict:find(CRef, CTG) of - {ok, Gs} -> Guids1 = gb_sets:difference(Gs, Guids), - case gb_sets:is_empty(Guids1) of - true -> dict:erase(CRef, CTG); - false -> dict:store(CRef, Guids1, CTG) + fun (MsgOnDiskFun, CTM) -> + MsgOnDiskFun(MsgIds, ActionTaken), + case dict:find(CRef, CTM) of + {ok, Gs} -> MsgIds1 = gb_sets:difference(Gs, MsgIds), + case gb_sets:is_empty(MsgIds1) of + true -> dict:erase(CRef, CTM); + false -> dict:store(CRef, MsgIds1, CTM) end; - error -> CTG + error -> CTM end end, CRef, State). -%% Detect whether the Guid is older or younger than the client's death +%% Detect whether the MsgId is older or younger than the client's death %% msg (if there is one). If the msg is older than the client death %% msg, and it has a 0 ref_count we must only alter the ref_count, not %% rewrite the msg - rewriting it would make it younger than the death %% msg and thus should be ignored. Note that this (correctly) returns %% false when testing to remove the death msg itself. -should_mask_action(CRef, Guid, +should_mask_action(CRef, MsgId, State = #msstate { dying_clients = DyingClients }) -> - case {sets:is_element(CRef, DyingClients), index_lookup(Guid, State)} of + case {sets:is_element(CRef, DyingClients), index_lookup(MsgId, State)} of {false, Location} -> {false, Location}; {true, not_found} -> @@ -1320,43 +1323,43 @@ list_sorted_file_names(Dir, Ext) -> %% message cache helper functions %%---------------------------------------------------------------------------- -maybe_insert_into_cache(DedupCacheEts, RefCount, Guid, Msg) +maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg) when RefCount > 1 -> - update_msg_cache(DedupCacheEts, Guid, Msg); -maybe_insert_into_cache(_DedupCacheEts, _RefCount, _Guid, _Msg) -> + update_msg_cache(DedupCacheEts, MsgId, Msg); +maybe_insert_into_cache(_DedupCacheEts, _RefCount, _MsgId, _Msg) -> ok. -update_msg_cache(CacheEts, Guid, Msg) -> - case ets:insert_new(CacheEts, {Guid, Msg, 1}) of +update_msg_cache(CacheEts, MsgId, Msg) -> + case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of true -> ok; false -> safe_ets_update_counter_ok( - CacheEts, Guid, {3, +1}, - fun () -> update_msg_cache(CacheEts, Guid, Msg) end) + CacheEts, MsgId, {3, +1}, + fun () -> update_msg_cache(CacheEts, MsgId, Msg) end) end. -remove_cache_entry(DedupCacheEts, Guid) -> - true = ets:delete(DedupCacheEts, Guid), +remove_cache_entry(DedupCacheEts, MsgId) -> + true = ets:delete(DedupCacheEts, MsgId), ok. -fetch_and_increment_cache(DedupCacheEts, Guid) -> - case ets:lookup(DedupCacheEts, Guid) of +fetch_and_increment_cache(DedupCacheEts, MsgId) -> + case ets:lookup(DedupCacheEts, MsgId) of [] -> not_found; - [{_Guid, Msg, _RefCount}] -> + [{_MsgId, Msg, _RefCount}] -> safe_ets_update_counter_ok( - DedupCacheEts, Guid, {3, +1}, + DedupCacheEts, MsgId, {3, +1}, %% someone has deleted us in the meantime, insert us - fun () -> ok = update_msg_cache(DedupCacheEts, Guid, Msg) end), + fun () -> ok = update_msg_cache(DedupCacheEts, MsgId, Msg) end), Msg end. -decrement_cache(DedupCacheEts, Guid) -> +decrement_cache(DedupCacheEts, MsgId) -> true = safe_ets_update_counter( - DedupCacheEts, Guid, {3, -1}, - fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, Guid); + DedupCacheEts, MsgId, {3, -1}, + fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, MsgId); (_N) -> true end, - %% Guid is not in there because although it's been + %% MsgId is not in there because although it's been %% delivered, it's never actually been read (think: %% persistent message held in RAM) fun () -> true end), @@ -1473,19 +1476,19 @@ count_msg_refs(Gen, Seed, State) -> case Gen(Seed) of finished -> ok; - {_Guid, 0, Next} -> + {_MsgId, 0, Next} -> count_msg_refs(Gen, Next, State); - {Guid, Delta, Next} -> - ok = case index_lookup(Guid, State) of + {MsgId, Delta, Next} -> + ok = case index_lookup(MsgId, State) of not_found -> - index_insert(#msg_location { guid = Guid, + index_insert(#msg_location { msg_id = MsgId, file = undefined, ref_count = Delta }, State); #msg_location { ref_count = RefCount } = StoreEntry -> NewRefCount = RefCount + Delta, case NewRefCount of - 0 -> index_delete(Guid, State); + 0 -> index_delete(MsgId, State); _ -> index_update(StoreEntry #msg_location { ref_count = NewRefCount }, State) @@ -1539,8 +1542,8 @@ scan_file_for_valid_messages(Dir, FileName) -> {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} end. -scan_fun({Guid, TotalSize, Offset, _Msg}, Acc) -> - [{Guid, TotalSize, Offset} | Acc]. +scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) -> + [{MsgId, TotalSize, Offset} | Acc]. %% Takes the list in *ascending* order (i.e. eldest message %% first). This is the opposite of what scan_file_for_valid_messages @@ -1619,8 +1622,8 @@ build_index_worker(Gatherer, State = #msstate { dir = Dir }, scan_file_for_valid_messages(Dir, filenum_to_name(File)), {ValidMessages, ValidTotalSize} = lists:foldl( - fun (Obj = {Guid, TotalSize, Offset}, {VMAcc, VTSAcc}) -> - case index_lookup(Guid, State) of + fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) -> + case index_lookup(MsgId, State) of #msg_location { file = undefined } = StoreEntry -> ok = index_update(StoreEntry #msg_location { file = File, offset = Offset, @@ -1638,7 +1641,7 @@ build_index_worker(Gatherer, State = #msstate { dir = Dir }, %% file size. [] -> {undefined, case ValidMessages of [] -> 0; - _ -> {_Guid, TotalSize, Offset} = + _ -> {_MsgId, TotalSize, Offset} = lists:last(ValidMessages), Offset + TotalSize end}; @@ -1903,8 +1906,8 @@ load_and_vacuum_message_file(File, #gc_state { dir = Dir, scan_file_for_valid_messages(Dir, filenum_to_name(File)), %% foldl will reverse so will end up with msgs in ascending offset order lists:foldl( - fun ({Guid, TotalSize, Offset}, Acc = {List, Size}) -> - case Index:lookup(Guid, IndexState) of + fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) -> + case Index:lookup(MsgId, IndexState) of #msg_location { file = File, total_size = TotalSize, offset = Offset, ref_count = 0 } = Entry -> ok = Index:delete_object(Entry, IndexState), @@ -1929,13 +1932,13 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, end, case lists:foldl( - fun (#msg_location { guid = Guid, offset = Offset, + fun (#msg_location { msg_id = MsgId, offset = Offset, total_size = TotalSize }, {CurOffset, Block = {BlockStart, BlockEnd}}) -> %% CurOffset is in the DestinationFile. %% Offset, BlockStart and BlockEnd are in the SourceFile %% update MsgLocation to reflect change of file and offset - ok = Index:update_fields(Guid, + ok = Index:update_fields(MsgId, [{#msg_location.file, Destination}, {#msg_location.offset, CurOffset}], IndexState), @@ -2002,9 +2005,9 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), - fun({Guid, _Size, _Offset, BinMsg}, ok) -> + fun({MsgId, _Size, _Offset, BinMsg}, ok) -> {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), - {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew), ok end, ok), file_handle_cache:close(RefOld), diff --git a/src/rabbit_msg_store_ets_index.erl b/src/rabbit_msg_store_ets_index.erl index 077400d6..d6dc5568 100644 --- a/src/rabbit_msg_store_ets_index.erl +++ b/src/rabbit_msg_store_ets_index.erl @@ -31,7 +31,7 @@ new(Dir) -> file:delete(filename:join(Dir, ?FILENAME)), - Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.guid}]), + Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]), #state { table = Tid, dir = Dir }. recover(Dir) -> -- cgit v1.2.1 From 87d9ba2a4387a56f228f6e2ffc54a354b8e6a67d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 17:55:05 +0000 Subject: guid -> msg_id in qi --- src/rabbit_queue_index.erl | 87 +++++++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 7b5aa120..a4984114 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -86,7 +86,7 @@ %% and seeding the message store on start up. %% %% Note that in general, the representation of a message's state as -%% the tuple: {('no_pub'|{Guid, MsgProps, IsPersistent}), +%% the tuple: {('no_pub'|{MsgId, MsgProps, IsPersistent}), %% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly %% necessary for most operations. However, for startup, and to ensure %% the safe and correct combination of journal entries with entries @@ -138,10 +138,10 @@ -define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)). -define(NO_EXPIRY, 0). --define(GUID_BYTES, 16). %% md5sum is 128 bit or 16 bytes --define(GUID_BITS, (?GUID_BYTES * 8)). +-define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes +-define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)). %% 16 bytes for md5sum + 8 for expiry + 2 for seq, bits and prefix --define(PUBLISH_RECORD_LENGTH_BYTES, ?GUID_BYTES + ?EXPIRY_BYTES + 2). +-define(PUBLISH_RECORD_LENGTH_BYTES, ?MSG_ID_BYTES + ?EXPIRY_BYTES + 2). %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * @@ -150,7 +150,7 @@ %% ---- misc ---- --define(PUB, {_, _, _}). %% {Guid, MsgProps, IsPersistent} +-define(PUB, {_, _, _}). %% {MsgId, MsgProps, IsPersistent} -define(READ_MODE, [binary, raw, read]). -define(READ_AHEAD_MODE, [{read_ahead, ?SEGMENT_TOTAL_SIZE} | ?READ_MODE]). @@ -159,7 +159,7 @@ %%---------------------------------------------------------------------------- -record(qistate, { dir, segments, journal_handle, dirty_count, - max_journal_entries, on_sync, unsynced_guids }). + max_journal_entries, on_sync, unsynced_msg_ids }). -record(segment, { num, path, journal_entries, unacked }). @@ -187,7 +187,7 @@ dirty_count :: integer(), max_journal_entries :: non_neg_integer(), on_sync :: on_sync_fun(), - unsynced_guids :: [rabbit_types:msg_id()] + unsynced_msg_ids :: [rabbit_types:msg_id()] }). -type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())). -type(walker(A) :: fun ((A) -> 'finished' | @@ -258,22 +258,22 @@ delete_and_terminate(State) -> ok = rabbit_misc:recursive_delete([Dir]), State1. -publish(Guid, SeqId, MsgProps, IsPersistent, - State = #qistate { unsynced_guids = UnsyncedGuids }) - when is_binary(Guid) -> - ?GUID_BYTES = size(Guid), +publish(MsgId, SeqId, MsgProps, IsPersistent, + State = #qistate { unsynced_msg_ids = UnsyncedMsgIds }) + when is_binary(MsgId) -> + ?MSG_ID_BYTES = size(MsgId), {JournalHdl, State1} = get_journal_handle( State #qistate { - unsynced_guids = [Guid | UnsyncedGuids] }), + unsynced_msg_ids = [MsgId | UnsyncedMsgIds] }), ok = file_handle_cache:append( JournalHdl, [<<(case IsPersistent of true -> ?PUB_PERSIST_JPREFIX; false -> ?PUB_TRANS_JPREFIX end):?JPREFIX_BITS, SeqId:?SEQ_BITS>>, - create_pub_record_body(Guid, MsgProps)]), + create_pub_record_body(MsgId, MsgProps)]), maybe_flush_journal( - add_to_journal(SeqId, {Guid, MsgProps, IsPersistent}, State1)). + add_to_journal(SeqId, {MsgId, MsgProps, IsPersistent}, State1)). deliver(SeqIds, State) -> deliver_or_ack(del, SeqIds, State). @@ -283,8 +283,8 @@ ack(SeqIds, State) -> %% This is only called when there are outstanding confirms and the %% queue is idle. -sync(State = #qistate { unsynced_guids = Guids }) -> - sync_if([] =/= Guids, State). +sync(State = #qistate { unsynced_msg_ids = MsgIds }) -> + sync_if([] =/= MsgIds, State). sync(SeqIds, State) -> %% The SeqIds here contains the SeqId of every publish and ack in @@ -387,7 +387,7 @@ blank_state(QueueName) -> dirty_count = 0, max_journal_entries = MaxJournal, on_sync = fun (_) -> ok end, - unsynced_guids = [] }. + unsynced_msg_ids = [] }. clean_file_name(Dir) -> filename:join(Dir, ?CLEAN_FILENAME). @@ -469,8 +469,9 @@ recover_segment(ContainsCheckFun, CleanShutdown, {SegEntries1, UnackedCountDelta} = segment_plus_journal(SegEntries, JEntries), array:sparse_foldl( - fun (RelSeq, {{Guid, _MsgProps, _IsPersistent}, Del, no_ack}, Segment1) -> - recover_message(ContainsCheckFun(Guid), CleanShutdown, + fun (RelSeq, {{MsgId, _MsgProps, _IsPersistent}, Del, no_ack}, + Segment1) -> + recover_message(ContainsCheckFun(MsgId), CleanShutdown, Del, RelSeq, Segment1) end, Segment #segment { unacked = UnackedCount + UnackedCountDelta }, @@ -514,17 +515,17 @@ queue_index_walker({next, Gatherer}) when is_pid(Gatherer) -> ok = gatherer:stop(Gatherer), ok = rabbit_misc:unlink_and_capture_exit(Gatherer), finished; - {value, {Guid, Count}} -> - {Guid, Count, {next, Gatherer}} + {value, {MsgId, Count}} -> + {MsgId, Count, {next, Gatherer}} end. queue_index_walker_reader(QueueName, Gatherer) -> State = #qistate { segments = Segments, dir = Dir } = recover_journal(blank_state(QueueName)), [ok = segment_entries_foldr( - fun (_RelSeq, {{Guid, _MsgProps, true}, _IsDelivered, no_ack}, + fun (_RelSeq, {{MsgId, _MsgProps, true}, _IsDelivered, no_ack}, ok) -> - gatherer:in(Gatherer, {Guid, 1}); + gatherer:in(Gatherer, {MsgId, 1}); (_RelSeq, _Value, Acc) -> Acc end, ok, segment_find_or_new(Seg, Dir, Segments)) || @@ -536,24 +537,24 @@ queue_index_walker_reader(QueueName, Gatherer) -> %% expiry/binary manipulation %%---------------------------------------------------------------------------- -create_pub_record_body(Guid, #message_properties{expiry = Expiry}) -> - [Guid, expiry_to_binary(Expiry)]. +create_pub_record_body(MsgId, #message_properties{expiry = Expiry}) -> + [MsgId, expiry_to_binary(Expiry)]. expiry_to_binary(undefined) -> <>; expiry_to_binary(Expiry) -> <>. read_pub_record_body(Hdl) -> - case file_handle_cache:read(Hdl, ?GUID_BYTES + ?EXPIRY_BYTES) of + case file_handle_cache:read(Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of {ok, Bin} -> %% work around for binary data fragmentation. See %% rabbit_msg_file:read_next/2 - <> = Bin, - <> = <>, + <> = Bin, + <> = <>, Exp = case Expiry of ?NO_EXPIRY -> undefined; X -> X end, - {Guid, #message_properties{expiry = Exp}}; + {MsgId, #message_properties{expiry = Exp}}; Error -> Error end. @@ -680,8 +681,8 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> case read_pub_record_body(Hdl) of - {Guid, MsgProps} -> - Publish = {Guid, MsgProps, + {MsgId, MsgProps} -> + Publish = {MsgId, MsgProps, case Prefix of ?PUB_PERSIST_JPREFIX -> true; ?PUB_TRANS_JPREFIX -> false @@ -715,9 +716,9 @@ sync_if(true, State = #qistate { journal_handle = JournalHdl }) -> ok = file_handle_cache:sync(JournalHdl), notify_sync(State). -notify_sync(State = #qistate { unsynced_guids = UG, on_sync = OnSyncFun }) -> +notify_sync(State = #qistate { unsynced_msg_ids = UG, on_sync = OnSyncFun }) -> OnSyncFun(gb_sets:from_list(UG)), - State #qistate { unsynced_guids = [] }. + State #qistate { unsynced_msg_ids = [] }. %%---------------------------------------------------------------------------- %% segment manipulation @@ -795,12 +796,12 @@ write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> ok = case Pub of no_pub -> ok; - {Guid, MsgProps, IsPersistent} -> + {MsgId, MsgProps, IsPersistent} -> file_handle_cache:append( Hdl, [<>, - create_pub_record_body(Guid, MsgProps)]) + create_pub_record_body(MsgId, MsgProps)]) end, ok = case {Del, Ack} of {no_del, no_ack} -> @@ -820,10 +821,10 @@ read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq}, {Messages, Segments}, Dir) -> Segment = segment_find_or_new(Seg, Dir, Segments), {segment_entries_foldr( - fun (RelSeq, {{Guid, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc) + fun (RelSeq, {{MsgId, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc) when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso (Seg < EndSeg orelse EndRelSeq >= RelSeq) -> - [ {Guid, reconstruct_seq_id(StartSeg, RelSeq), MsgProps, + [ {MsgId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps, IsPersistent, IsDelivered == del} | Acc ]; (_RelSeq, _Value, Acc) -> Acc @@ -853,8 +854,8 @@ load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of {ok, <>} -> - {Guid, MsgProps} = read_pub_record_body(Hdl), - Obj = {{Guid, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, + {MsgId, MsgProps} = read_pub_record_body(Hdl), + Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, SegEntries1 = array:set(RelSeq, Obj, SegEntries), load_segment_entries(KeepAcked, Hdl, SegEntries1, UnackedCount + 1); @@ -1001,17 +1002,17 @@ add_queue_ttl_journal(<>) -> {<>, Rest}; add_queue_ttl_journal(<>) -> - {[<>, Guid, + MsgId:?MSG_ID_BYTES/binary, Rest/binary>>) -> + {[<>, MsgId, expiry_to_binary(undefined)], Rest}; add_queue_ttl_journal(_) -> stop. add_queue_ttl_segment(<>) -> {[<>, Guid, expiry_to_binary(undefined)], Rest}; + RelSeq:?REL_SEQ_BITS>>, MsgId, expiry_to_binary(undefined)], Rest}; add_queue_ttl_segment(<>) -> {<>, -- cgit v1.2.1 From ab3668ec2104d35a57efdf828db521ecbb5a0dac Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:30:06 +0000 Subject: guid -> msg_id --- src/rabbit_backing_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 6a21e10f..03c1fdd1 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -62,7 +62,7 @@ behaviour_info(callbacks) -> {fetch, 2}, %% Acktags supplied are for messages which can now be forgotten - %% about. Must return 1 guid per Ack, in the same order as Acks. + %% about. Must return 1 msg_id per Ack, in the same order as Acks. {ack, 2}, %% A publish, but in the context of a transaction. -- cgit v1.2.1 From 5769f3263378c0d6fb48bee884e6f24cc65304b1 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:30:25 +0000 Subject: guid -> msg_id in vq except for #basic_message --- src/rabbit_variable_queue.erl | 220 +++++++++++++++++++++--------------------- 1 file changed, 110 insertions(+), 110 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 58a28d32..1d32cec6 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -156,7 +156,7 @@ %% segments. %% %% Pending acks are recorded in memory either as the tuple {SeqId, -%% Guid, MsgProps} (tuple-form) or as the message itself (message- +%% MsgId, MsgProps} (tuple-form) or as the message itself (message- %% form). Acks for persistent messages are always stored in the tuple- %% form. Acks for transient messages are also stored in tuple-form if %% the message has been sent to disk as part of the memory reduction @@ -261,7 +261,7 @@ -record(msg_status, { seq_id, - guid, + msg_id, msg, is_persistent, is_delivered, @@ -400,10 +400,10 @@ stop_msg_store() -> init(QueueName, IsDurable, Recover) -> Self = self(), init(QueueName, IsDurable, Recover, - fun (Guids, ActionTaken) -> - msgs_written_to_disk(Self, Guids, ActionTaken) + fun (MsgIds, ActionTaken) -> + msgs_written_to_disk(Self, MsgIds, ActionTaken) end, - fun (Guids) -> msg_indices_written_to_disk(Self, Guids) end). + fun (MsgIds) -> msg_indices_written_to_disk(Self, MsgIds) end). init(QueueName, IsDurable, false, MsgOnDiskFun, MsgIdxOnDiskFun) -> IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), @@ -432,8 +432,8 @@ init(QueueName, true, true, MsgOnDiskFun, MsgIdxOnDiskFun) -> rabbit_queue_index:recover( QueueName, Terms1, rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE), - fun (Guid) -> - rabbit_msg_store:contains(Guid, PersistentClient) + fun (MsgId) -> + rabbit_msg_store:contains(MsgId, PersistentClient) end, MsgIdxOnDiskFun), init(true, IndexState, DeltaCount, Terms1, @@ -509,17 +509,17 @@ publish(Msg, MsgProps, State) -> {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), a(reduce_memory_use(State1)). -publish_delivered(false, #basic_message { guid = Guid }, +publish_delivered(false, #basic_message { guid = MsgId }, #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> case NeedsConfirming of - true -> blind_confirm(self(), gb_sets:singleton(Guid)); + true -> blind_confirm(self(), gb_sets:singleton(MsgId)); false -> ok end, {undefined, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, - guid = Guid }, + guid = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0, @@ -535,7 +535,7 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State), State2 = record_pending_ack(m(MsgStatus1), State1), PCount1 = PCount + one_if(IsPersistent1), - UC1 = gb_sets_maybe_insert(NeedsConfirming, Guid, UC), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), {SeqId, a(reduce_memory_use( State2 #vqstate { next_seq_id = SeqId + 1, out_counter = OutCount + 1, @@ -586,12 +586,12 @@ internal_queue_out(Fun, State = #vqstate { q4 = Q4 }) -> end. read_msg(MsgStatus = #msg_status { msg = undefined, - guid = Guid, + msg_id = MsgId, is_persistent = IsPersistent }, State = #vqstate { ram_msg_count = RamMsgCount, msg_store_clients = MSCState}) -> {{ok, Msg = #basic_message {}}, MSCState1} = - msg_store_read(MSCState, IsPersistent, Guid), + msg_store_read(MSCState, IsPersistent, MsgId), {MsgStatus #msg_status { msg = Msg }, State #vqstate { ram_msg_count = RamMsgCount + 1, msg_store_clients = MSCState1 }}; @@ -600,7 +600,7 @@ read_msg(MsgStatus, State) -> internal_fetch(AckRequired, MsgStatus = #msg_status { seq_id = SeqId, - guid = Guid, + msg_id = MsgId, msg = Msg, is_persistent = IsPersistent, is_delivered = IsDelivered, @@ -619,7 +619,7 @@ internal_fetch(AckRequired, MsgStatus = #msg_status { %% 2. Remove from msg_store and queue index, if necessary Rem = fun () -> - ok = msg_store_remove(MSCState, IsPersistent, [Guid]) + ok = msg_store_remove(MSCState, IsPersistent, [MsgId]) end, Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end, IndexState2 = @@ -678,7 +678,8 @@ tx_rollback(Txn, State = #vqstate { durable = IsDurable, #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), erase_tx(Txn), ok = case IsDurable of - true -> msg_store_remove(MSCState, true, persistent_guids(Pubs)); + true -> msg_store_remove(MSCState, true, + persistent_msg_ids(Pubs)); false -> ok end, {lists:append(AckTags), a(State)}. @@ -689,13 +690,13 @@ tx_commit(Txn, Fun, MsgPropsFun, #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), erase_tx(Txn), AckTags1 = lists:append(AckTags), - PersistentGuids = persistent_guids(Pubs), - HasPersistentPubs = PersistentGuids =/= [], + PersistentMsgIds = persistent_msg_ids(Pubs), + HasPersistentPubs = PersistentMsgIds =/= [], {AckTags1, a(case IsDurable andalso HasPersistentPubs of true -> ok = msg_store_sync( - MSCState, true, PersistentGuids, - msg_store_callback(PersistentGuids, Pubs, AckTags1, + MSCState, true, PersistentMsgIds, + msg_store_callback(PersistentMsgIds, Pubs, AckTags1, Fun, MsgPropsFun)), State; false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, @@ -713,10 +714,10 @@ requeue(AckTags, MsgPropsFun, State) -> {_SeqId, State2} = publish(Msg, MsgPropsFun1(MsgProps), true, false, State1), State2; - ({IsPersistent, Guid, MsgProps}, State1) -> + ({IsPersistent, MsgId, MsgProps}, State1) -> #vqstate { msg_store_clients = MSCState } = State1, {{ok, Msg = #basic_message{}}, MSCState1} = - msg_store_read(MSCState, IsPersistent, Guid), + msg_store_read(MSCState, IsPersistent, MsgId), State2 = State1 #vqstate { msg_store_clients = MSCState1 }, {_SeqId, State3} = publish(Msg, MsgPropsFun1(MsgProps), true, true, State2), @@ -905,12 +906,12 @@ cons_if(true, E, L) -> [E | L]; cons_if(false, _E, L) -> L. gb_sets_maybe_insert(false, _Val, Set) -> Set; -%% when requeueing, we re-add a guid to the unconfirmed set +%% when requeueing, we re-add a msg_id to the unconfirmed set gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). -msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = Guid }, +msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = MsgId }, MsgProps) -> - #msg_status { seq_id = SeqId, guid = Guid, msg = Msg, + #msg_status { seq_id = SeqId, msg_id = MsgId, msg = Msg, is_persistent = IsPersistent, is_delivered = false, msg_on_disk = false, index_on_disk = false, msg_props = MsgProps }. @@ -937,30 +938,30 @@ msg_store_client_init(MsgStore, Ref, MsgOnDiskFun) -> MsgStore, Ref, MsgOnDiskFun, msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE)). -msg_store_write(MSCState, IsPersistent, Guid, Msg) -> +msg_store_write(MSCState, IsPersistent, MsgId, Msg) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:write(Guid, Msg, MSCState1) end). + fun (MSCState1) -> rabbit_msg_store:write(MsgId, Msg, MSCState1) end). -msg_store_read(MSCState, IsPersistent, Guid) -> +msg_store_read(MSCState, IsPersistent, MsgId) -> with_msg_store_state( MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:read(Guid, MSCState1) end). + fun (MSCState1) -> rabbit_msg_store:read(MsgId, MSCState1) end). -msg_store_remove(MSCState, IsPersistent, Guids) -> +msg_store_remove(MSCState, IsPersistent, MsgIds) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MCSState1) -> rabbit_msg_store:remove(Guids, MCSState1) end). + fun (MCSState1) -> rabbit_msg_store:remove(MsgIds, MCSState1) end). -msg_store_release(MSCState, IsPersistent, Guids) -> +msg_store_release(MSCState, IsPersistent, MsgIds) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MCSState1) -> rabbit_msg_store:release(Guids, MCSState1) end). + fun (MCSState1) -> rabbit_msg_store:release(MsgIds, MCSState1) end). -msg_store_sync(MSCState, IsPersistent, Guids, Callback) -> +msg_store_sync(MSCState, IsPersistent, MsgIds, Fun) -> with_immutable_msg_store_state( MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:sync(Guids, Callback, MSCState1) end). + fun (MSCState1) -> rabbit_msg_store:sync(MsgIds, Fun, MSCState1) end). msg_store_close_fds(MSCState, IsPersistent) -> with_msg_store_state( @@ -994,21 +995,21 @@ store_tx(Txn, Tx) -> put({txn, Txn}, Tx). erase_tx(Txn) -> erase({txn, Txn}). -persistent_guids(Pubs) -> - [Guid || {#basic_message { guid = Guid, - is_persistent = true }, _MsgProps} <- Pubs]. +persistent_msg_ids(Pubs) -> + [MsgId || {#basic_message { guid = MsgId, + is_persistent = true }, _MsgProps} <- Pubs]. betas_from_index_entries(List, TransientThreshold, IndexState) -> {Filtered, Delivers, Acks} = lists:foldr( - fun ({Guid, SeqId, MsgProps, IsPersistent, IsDelivered}, + fun ({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}, {Filtered1, Delivers1, Acks1}) -> case SeqId < TransientThreshold andalso not IsPersistent of true -> {Filtered1, cons_if(not IsDelivered, SeqId, Delivers1), [SeqId | Acks1]}; false -> {[m(#msg_status { msg = undefined, - guid = Guid, + msg_id = MsgId, seq_id = SeqId, is_persistent = IsPersistent, is_delivered = IsDelivered, @@ -1114,7 +1115,7 @@ blank_rate(Timestamp, IngressLength) -> avg_ingress = 0.0, timestamp = Timestamp }. -msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> +msg_store_callback(PersistentMsgIds, Pubs, AckTags, Fun, MsgPropsFun) -> Self = self(), F = fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( Self, fun (StateN) -> {[], tx_commit_post_msg_store( @@ -1124,14 +1125,14 @@ msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun) -> end, fun () -> spawn(fun () -> ok = rabbit_misc:with_exit_handler( fun () -> remove_persistent_messages( - PersistentGuids) + PersistentMsgIds) end, F) end) end. -remove_persistent_messages(Guids) -> +remove_persistent_messages(MsgIds) -> PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, undefined), - ok = rabbit_msg_store:remove(Guids, PersistentClient), + ok = rabbit_msg_store:remove(MsgIds, PersistentClient), rabbit_msg_store:client_delete_and_terminate(PersistentClient). tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags, Fun, MsgPropsFun, @@ -1149,7 +1150,7 @@ tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags, Fun, MsgPropsFun, case dict:fetch(AckTag, PA) of #msg_status {} -> false; - {IsPersistent, _Guid, _MsgProps} -> + {IsPersistent, _MsgId, _MsgProps} -> IsPersistent end]; false -> [] @@ -1215,38 +1216,38 @@ purge_betas_and_deltas(LensByStore, end. remove_queue_entries(Fold, Q, LensByStore, IndexState, MSCState) -> - {GuidsByStore, Delivers, Acks} = + {MsgIdsByStore, Delivers, Acks} = Fold(fun remove_queue_entries1/2, {orddict:new(), [], []}, Q), - ok = orddict:fold(fun (IsPersistent, Guids, ok) -> - msg_store_remove(MSCState, IsPersistent, Guids) - end, ok, GuidsByStore), - {sum_guids_by_store_to_len(LensByStore, GuidsByStore), + ok = orddict:fold(fun (IsPersistent, MsgIds, ok) -> + msg_store_remove(MSCState, IsPersistent, MsgIds) + end, ok, MsgIdsByStore), + {sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore), rabbit_queue_index:ack(Acks, rabbit_queue_index:deliver(Delivers, IndexState))}. remove_queue_entries1( - #msg_status { guid = Guid, seq_id = SeqId, + #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered, msg_on_disk = MsgOnDisk, index_on_disk = IndexOnDisk, is_persistent = IsPersistent }, - {GuidsByStore, Delivers, Acks}) -> + {MsgIdsByStore, Delivers, Acks}) -> {case MsgOnDisk of - true -> rabbit_misc:orddict_cons(IsPersistent, Guid, GuidsByStore); - false -> GuidsByStore + true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore); + false -> MsgIdsByStore end, cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers), cons_if(IndexOnDisk, SeqId, Acks)}. -sum_guids_by_store_to_len(LensByStore, GuidsByStore) -> +sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) -> orddict:fold( - fun (IsPersistent, Guids, LensByStore1) -> - orddict:update_counter(IsPersistent, length(Guids), LensByStore1) - end, LensByStore, GuidsByStore). + fun (IsPersistent, MsgIds, LensByStore1) -> + orddict:update_counter(IsPersistent, length(MsgIds), LensByStore1) + end, LensByStore, MsgIdsByStore). %%---------------------------------------------------------------------------- %% Internal gubbins for publishing %%---------------------------------------------------------------------------- -publish(Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, +publish(Msg = #basic_message { is_persistent = IsPersistent, guid = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, IsDelivered, MsgOnDisk, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, @@ -1266,7 +1267,7 @@ publish(Msg = #basic_message { is_persistent = IsPersistent, guid = Guid }, true -> State1 #vqstate { q4 = queue:in(m(MsgStatus1), Q4) } end, PCount1 = PCount + one_if(IsPersistent1), - UC1 = gb_sets_maybe_insert(NeedsConfirming, Guid, UC), + UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC), {SeqId, State2 #vqstate { next_seq_id = SeqId + 1, len = Len + 1, in_counter = InCount + 1, @@ -1278,14 +1279,14 @@ maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status { msg_on_disk = true }, _MSCState) -> MsgStatus; maybe_write_msg_to_disk(Force, MsgStatus = #msg_status { - msg = Msg, guid = Guid, + msg = Msg, msg_id = MsgId, is_persistent = IsPersistent }, MSCState) when Force orelse IsPersistent -> Msg1 = Msg #basic_message { %% don't persist any recoverable decoded properties content = rabbit_binary_parser:clear_decoded_content( Msg #basic_message.content)}, - ok = msg_store_write(MSCState, IsPersistent, Guid, Msg1), + ok = msg_store_write(MSCState, IsPersistent, MsgId, Msg1), MsgStatus #msg_status { msg_on_disk = true }; maybe_write_msg_to_disk(_Force, MsgStatus, _MSCState) -> MsgStatus. @@ -1295,7 +1296,7 @@ maybe_write_index_to_disk(_Force, MsgStatus = #msg_status { true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION {MsgStatus, IndexState}; maybe_write_index_to_disk(Force, MsgStatus = #msg_status { - guid = Guid, + msg_id = MsgId, seq_id = SeqId, is_persistent = IsPersistent, is_delivered = IsDelivered, @@ -1303,7 +1304,7 @@ maybe_write_index_to_disk(Force, MsgStatus = #msg_status { when Force orelse IsPersistent -> true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION IndexState1 = rabbit_queue_index:publish( - Guid, SeqId, MsgProps, IsPersistent, IndexState), + MsgId, SeqId, MsgProps, IsPersistent, IndexState), {MsgStatus #msg_status { index_on_disk = true }, maybe_write_delivered(IsDelivered, SeqId, IndexState1)}; maybe_write_index_to_disk(_Force, MsgStatus, IndexState) -> @@ -1322,7 +1323,7 @@ maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, %%---------------------------------------------------------------------------- record_pending_ack(#msg_status { seq_id = SeqId, - guid = Guid, + msg_id = MsgId, is_persistent = IsPersistent, msg_on_disk = MsgOnDisk, msg_props = MsgProps } = MsgStatus, @@ -1331,8 +1332,8 @@ record_pending_ack(#msg_status { seq_id = SeqId, ack_in_counter = AckInCount}) -> {AckEntry, RAI1} = case MsgOnDisk of - true -> {{IsPersistent, Guid, MsgProps}, RAI}; - false -> {MsgStatus, gb_trees:insert(SeqId, Guid, RAI)} + true -> {{IsPersistent, MsgId, MsgProps}, RAI}; + false -> {MsgStatus, gb_trees:insert(SeqId, MsgId, RAI)} end, PA1 = dict:store(SeqId, AckEntry, PA), State #vqstate { pending_ack = PA1, @@ -1343,28 +1344,28 @@ remove_pending_ack(KeepPersistent, State = #vqstate { pending_ack = PA, index_state = IndexState, msg_store_clients = MSCState }) -> - {PersistentSeqIds, GuidsByStore} = + {PersistentSeqIds, MsgIdsByStore} = dict:fold(fun accumulate_ack/3, accumulate_ack_init(), PA), State1 = State #vqstate { pending_ack = dict:new(), ram_ack_index = gb_trees:empty() }, case KeepPersistent of - true -> case orddict:find(false, GuidsByStore) of - error -> State1; - {ok, Guids} -> ok = msg_store_remove(MSCState, false, - Guids), + true -> case orddict:find(false, MsgIdsByStore) of + error -> State1; + {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, + MsgIds), State1 end; false -> IndexState1 = rabbit_queue_index:ack(PersistentSeqIds, IndexState), - [ok = msg_store_remove(MSCState, IsPersistent, Guids) - || {IsPersistent, Guids} <- orddict:to_list(GuidsByStore)], + [ok = msg_store_remove(MSCState, IsPersistent, MsgIds) + || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], State1 #vqstate { index_state = IndexState1 } end. ack(_MsgStoreFun, _Fun, [], State) -> State; ack(MsgStoreFun, Fun, AckTags, State) -> - {{PersistentSeqIds, GuidsByStore}, + {{PersistentSeqIds, MsgIdsByStore}, State1 = #vqstate { index_state = IndexState, msg_store_clients = MSCState, persistent_count = PCount, @@ -1380,10 +1381,10 @@ ack(MsgStoreFun, Fun, AckTags, State) -> gb_trees:delete_any(SeqId, RAI)})} end, {accumulate_ack_init(), State}, AckTags), IndexState1 = rabbit_queue_index:ack(PersistentSeqIds, IndexState), - [ok = MsgStoreFun(MSCState, IsPersistent, Guids) - || {IsPersistent, Guids} <- orddict:to_list(GuidsByStore)], - PCount1 = PCount - find_persistent_count(sum_guids_by_store_to_len( - orddict:new(), GuidsByStore)), + [ok = MsgStoreFun(MSCState, IsPersistent, MsgIds) + || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], + PCount1 = PCount - find_persistent_count(sum_msg_ids_by_store_to_len( + orddict:new(), MsgIdsByStore)), State1 #vqstate { index_state = IndexState1, persistent_count = PCount1, ack_out_counter = AckOutCount + length(AckTags) }. @@ -1393,12 +1394,12 @@ accumulate_ack_init() -> {[], orddict:new()}. accumulate_ack(_SeqId, #msg_status { is_persistent = false, %% ASSERTIONS msg_on_disk = false, index_on_disk = false }, - {PersistentSeqIdsAcc, GuidsByStore}) -> - {PersistentSeqIdsAcc, GuidsByStore}; -accumulate_ack(SeqId, {IsPersistent, Guid, _MsgProps}, - {PersistentSeqIdsAcc, GuidsByStore}) -> + {PersistentSeqIdsAcc, MsgIdsByStore}) -> + {PersistentSeqIdsAcc, MsgIdsByStore}; +accumulate_ack(SeqId, {IsPersistent, MsgId, _MsgProps}, + {PersistentSeqIdsAcc, MsgIdsByStore}) -> {cons_if(IsPersistent, SeqId, PersistentSeqIdsAcc), - rabbit_misc:orddict_cons(IsPersistent, Guid, GuidsByStore)}. + rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore)}. find_persistent_count(LensByStore) -> case orddict:find(true, LensByStore) of @@ -1417,12 +1418,12 @@ confirm_commit_index(State = #vqstate { index_state = IndexState }) -> false -> State end. -remove_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, +remove_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - State #vqstate { msgs_on_disk = gb_sets:difference(MOD, GuidSet), - msg_indices_on_disk = gb_sets:difference(MIOD, GuidSet), - unconfirmed = gb_sets:difference(UC, GuidSet) }. + State #vqstate { msgs_on_disk = gb_sets:difference(MOD, MsgIdSet), + msg_indices_on_disk = gb_sets:difference(MIOD, MsgIdSet), + unconfirmed = gb_sets:difference(UC, MsgIdSet) }. needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, unconfirmed = UC }) -> @@ -1439,37 +1440,37 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, %% subtraction. not (gb_sets:is_empty(UC) orelse gb_sets:is_subset(UC, MIOD)). -msgs_confirmed(GuidSet, State) -> - {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. +msgs_confirmed(MsgIdSet, State) -> + {gb_sets:to_list(MsgIdSet), remove_confirms(MsgIdSet, State)}. -blind_confirm(QPid, GuidSet) -> +blind_confirm(QPid, MsgIdSet) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - QPid, fun (State) -> msgs_confirmed(GuidSet, State) end). + QPid, fun (State) -> msgs_confirmed(MsgIdSet, State) end). -msgs_written_to_disk(QPid, GuidSet, removed) -> - blind_confirm(QPid, GuidSet); -msgs_written_to_disk(QPid, GuidSet, written) -> +msgs_written_to_disk(QPid, MsgIdSet, removed) -> + blind_confirm(QPid, MsgIdSet); +msgs_written_to_disk(QPid, MsgIdSet, written) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( QPid, fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), + Written = gb_sets:intersection(UC, MsgIdSet), + msgs_confirmed(gb_sets:intersection(MsgIdSet, MIOD), State #vqstate { msgs_on_disk = - gb_sets:union( - MOD, gb_sets:intersection(UC, GuidSet)) }) + gb_sets:union(MOD, Written) }) end). -msg_indices_written_to_disk(QPid, GuidSet) -> +msg_indices_written_to_disk(QPid, MsgIdSet) -> rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( QPid, fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), + Written = gb_sets:intersection(UC, MsgIdSet), + msgs_confirmed(gb_sets:intersection(MsgIdSet, MOD), State #vqstate { msg_indices_on_disk = - gb_sets:union( - MIOD, gb_sets:intersection(UC, GuidSet)) }) + gb_sets:union(MIOD, Written) }) end). %%---------------------------------------------------------------------------- @@ -1547,17 +1548,16 @@ limit_ram_acks(Quota, State = #vqstate { pending_ack = PA, true -> {Quota, State}; false -> - {SeqId, Guid, RAI1} = gb_trees:take_largest(RAI), + {SeqId, MsgId, RAI1} = gb_trees:take_largest(RAI), MsgStatus = #msg_status { - guid = Guid, %% ASSERTION + msg_id = MsgId, %% ASSERTION is_persistent = false, %% ASSERTION msg_props = MsgProps } = dict:fetch(SeqId, PA), {_, State1} = maybe_write_to_disk(true, false, MsgStatus, State), + PA1 = dict:store(SeqId, {false, MsgId, MsgProps}, PA), limit_ram_acks(Quota - 1, - State1 #vqstate { - pending_ack = - dict:store(SeqId, {false, Guid, MsgProps}, PA), - ram_ack_index = RAI1 }) + State1 #vqstate { pending_ack = PA1, + ram_ack_index = RAI1 }) end. @@ -1818,9 +1818,9 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> multiple_routing_keys() -> transform_storage( fun ({basic_message, ExchangeName, Routing_Key, Content, - Guid, Persistent}) -> + MsgId, Persistent}) -> {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; + MsgId, Persistent}}; (_) -> {error, corrupt_message} end), ok. -- cgit v1.2.1 From 8569560c351598e90c38b2a794b1d46b96347b76 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:36:49 +0000 Subject: #basic_message.guid -> id --- include/rabbit.hrl | 2 +- src/rabbit_amqqueue_process.erl | 2 +- src/rabbit_basic.erl | 12 ++++++------ src/rabbit_types.erl | 2 +- src/rabbit_variable_queue.erl | 10 +++++----- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 4d75b546..9f483c30 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -62,7 +62,7 @@ -record(listener, {node, protocol, host, ip_address, port}). --record(basic_message, {exchange_name, routing_keys = [], content, guid, +-record(basic_message, {exchange_name, routing_keys = [], content, id, is_persistent}). -record(ssl_socket, {tcp, ssl}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 44053593..57426e13 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -433,7 +433,7 @@ record_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { is_persistent = true, - guid = Guid}}, + id = Guid}}, State = #q{guid_to_channel = GTC, q = #amqqueue{durable = true}}) -> diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 57aad808..43230f30 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -116,12 +116,12 @@ message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> try {ok, #basic_message{ - exchange_name = ExchangeName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - guid = rabbit_guid:guid(), - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | - header_routes(Props#'P_basic'.headers)]}} + exchange_name = ExchangeName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + id = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | + header_routes(Props#'P_basic'.headers)]}} catch {error, _Reason} = Error -> Error end. diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 899291f2..90dfd38d 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -67,7 +67,7 @@ #basic_message{exchange_name :: rabbit_exchange:name(), routing_keys :: [rabbit_router:routing_key()], content :: content(), - guid :: msg_id(), + id :: msg_id(), is_persistent :: boolean()}). -type(message() :: basic_message()). -type(delivery() :: diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 1d32cec6..0c4c06e8 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -509,7 +509,7 @@ publish(Msg, MsgProps, State) -> {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), a(reduce_memory_use(State1)). -publish_delivered(false, #basic_message { guid = MsgId }, +publish_delivered(false, #basic_message { id = MsgId }, #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> @@ -519,7 +519,7 @@ publish_delivered(false, #basic_message { guid = MsgId }, end, {undefined, a(State)}; publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, - guid = MsgId }, + id = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0, @@ -909,7 +909,7 @@ gb_sets_maybe_insert(false, _Val, Set) -> Set; %% when requeueing, we re-add a msg_id to the unconfirmed set gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set). -msg_status(IsPersistent, SeqId, Msg = #basic_message { guid = MsgId }, +msg_status(IsPersistent, SeqId, Msg = #basic_message { id = MsgId }, MsgProps) -> #msg_status { seq_id = SeqId, msg_id = MsgId, msg = Msg, is_persistent = IsPersistent, is_delivered = false, @@ -996,7 +996,7 @@ store_tx(Txn, Tx) -> put({txn, Txn}, Tx). erase_tx(Txn) -> erase({txn, Txn}). persistent_msg_ids(Pubs) -> - [MsgId || {#basic_message { guid = MsgId, + [MsgId || {#basic_message { id = MsgId, is_persistent = true }, _MsgProps} <- Pubs]. betas_from_index_entries(List, TransientThreshold, IndexState) -> @@ -1247,7 +1247,7 @@ sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) -> %% Internal gubbins for publishing %%---------------------------------------------------------------------------- -publish(Msg = #basic_message { is_persistent = IsPersistent, guid = MsgId }, +publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, IsDelivered, MsgOnDisk, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4, -- cgit v1.2.1 From 1bd39c0325baec4014cb05654f2be02f8843fdc8 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:46:21 +0000 Subject: guid -> msg_id in amqqueue_process --- src/rabbit_amqqueue_process.erl | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 57426e13..650b6a68 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -46,7 +46,7 @@ rate_timer_ref, expiry_timer_ref, stats_timer, - guid_to_channel, + msg_id_to_channel, ttl, ttl_timer_ref }). @@ -112,7 +112,7 @@ init(Q) -> expiry_timer_ref = undefined, ttl = undefined, stats_timer = rabbit_event:init_stats_timer(), - guid_to_channel = dict:new()}, hibernate, + msg_id_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. terminate(shutdown, State = #q{backing_queue = BQ}) -> @@ -404,22 +404,22 @@ deliver_from_queue_deliver(AckRequired, false, State) -> fetch(AckRequired, State), {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. -confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> - {CMs, GTC1} = +confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> + {CMs, MTC1} = lists:foldl( - fun(Guid, {CMs, GTC0}) -> - case dict:find(Guid, GTC0) of + fun(MsgId, {CMs, MTC0}) -> + case dict:find(MsgId, MTC0) of {ok, {ChPid, MsgSeqNo}} -> {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, GTC0)}; + dict:erase(MsgId, MTC0)}; _ -> - {CMs, GTC0} + {CMs, MTC0} end - end, {gb_trees:empty(), GTC}, Guids), + end, {gb_trees:empty(), MTC}, MsgIds), gb_trees:map(fun(ChPid, MsgSeqNos) -> rabbit_channel:confirm(ChPid, MsgSeqNos) end, CMs), - State#q{guid_to_channel = GTC1}. + State#q{msg_id_to_channel = MTC1}. gb_trees_cons(Key, Value, Tree) -> case gb_trees:lookup(Key, Tree) of @@ -433,12 +433,12 @@ record_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { is_persistent = true, - id = Guid}}, + id = MsgId}}, State = - #q{guid_to_channel = GTC, - q = #amqqueue{durable = true}}) -> + #q{msg_id_to_channel = MTC, + q = #amqqueue{durable = true}}) -> {confirm, - State#q{guid_to_channel = dict:store(Guid, {ChPid, MsgSeqNo}, GTC)}}; + State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; record_confirm_message(_Delivery, State) -> {no_confirm, State}. @@ -618,9 +618,9 @@ backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State). maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> - {Guids, BQS1} = Fun(BQS), + {MsgIds, BQS1} = Fun(BQS), run_message_queue( - confirm_messages(Guids, State#q{backing_queue_state = BQS1})). + confirm_messages(MsgIds, State#q{backing_queue_state = BQS1})). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, State = #q{backing_queue = BQ, @@ -767,8 +767,8 @@ prioritise_cast(Msg, _State) -> maybe_expire -> 8; drop_expired -> 8; emit_stats -> 7; - {ack, _Txn, _MsgIds, _ChPid} -> 7; - {reject, _MsgIds, _Requeue, _ChPid} -> 7; + {ack, _Txn, _AckTags, _ChPid} -> 7; + {reject, _AckTags, _Requeue, _ChPid} -> 7; {notify_sent, _ChPid} -> 7; {unblock, _ChPid} -> 7; {maybe_run_queue_via_backing_queue, _Fun} -> 6; -- cgit v1.2.1 From 1076e2220865be678888d3ec1fd2799bdb55da60 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 18:58:26 +0000 Subject: guid -> msg_id in tests --- src/rabbit_tests.erl | 200 +++++++++++++++++++++++++-------------------------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c6250df..2def7573 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1602,50 +1602,50 @@ restart_msg_store_empty() -> ok = rabbit_variable_queue:start_msg_store( undefined, {fun (ok) -> finished end, ok}). -guid_bin(X) -> +msg_id_bin(X) -> erlang:md5(term_to_binary(X)). msg_store_client_init(MsgStore, Ref) -> rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined). -msg_store_contains(Atom, Guids, MSCState) -> +msg_store_contains(Atom, MsgIds, MSCState) -> Atom = lists:foldl( - fun (Guid, Atom1) when Atom1 =:= Atom -> - rabbit_msg_store:contains(Guid, MSCState) end, - Atom, Guids). + fun (MsgId, Atom1) when Atom1 =:= Atom -> + rabbit_msg_store:contains(MsgId, MSCState) end, + Atom, MsgIds). -msg_store_sync(Guids, MSCState) -> +msg_store_sync(MsgIds, MSCState) -> Ref = make_ref(), Self = self(), - ok = rabbit_msg_store:sync(Guids, fun () -> Self ! {sync, Ref} end, + ok = rabbit_msg_store:sync(MsgIds, fun () -> Self ! {sync, Ref} end, MSCState), receive {sync, Ref} -> ok after 10000 -> - io:format("Sync from msg_store missing for guids ~p~n", [Guids]), + io:format("Sync from msg_store missing for msg_ids ~p~n", [MsgIds]), throw(timeout) end. -msg_store_read(Guids, MSCState) -> - lists:foldl(fun (Guid, MSCStateM) -> - {{ok, Guid}, MSCStateN} = rabbit_msg_store:read( - Guid, MSCStateM), +msg_store_read(MsgIds, MSCState) -> + lists:foldl(fun (MsgId, MSCStateM) -> + {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read( + MsgId, MSCStateM), MSCStateN - end, MSCState, Guids). + end, MSCState, MsgIds). -msg_store_write(Guids, MSCState) -> - ok = lists:foldl( - fun (Guid, ok) -> rabbit_msg_store:write(Guid, Guid, MSCState) end, - ok, Guids). +msg_store_write(MsgIds, MSCState) -> + ok = lists:foldl(fun (MsgId, ok) -> + rabbit_msg_store:write(MsgId, MsgId, MSCState) + end, ok, MsgIds). -msg_store_remove(Guids, MSCState) -> - rabbit_msg_store:remove(Guids, MSCState). +msg_store_remove(MsgIds, MSCState) -> + rabbit_msg_store:remove(MsgIds, MSCState). -msg_store_remove(MsgStore, Ref, Guids) -> +msg_store_remove(MsgStore, Ref, MsgIds) -> with_msg_store_client(MsgStore, Ref, fun (MSCStateM) -> - ok = msg_store_remove(Guids, MSCStateM), + ok = msg_store_remove(MsgIds, MSCStateM), MSCStateM end). @@ -1655,140 +1655,140 @@ with_msg_store_client(MsgStore, Ref, Fun) -> foreach_with_msg_store_client(MsgStore, Ref, Fun, L) -> rabbit_msg_store:client_terminate( - lists:foldl(fun (Guid, MSCState) -> Fun(Guid, MSCState) end, + lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end, msg_store_client_init(MsgStore, Ref), L)). test_msg_store() -> restart_msg_store_empty(), Self = self(), - Guids = [guid_bin(M) || M <- lists:seq(1,100)], - {Guids1stHalf, Guids2ndHalf} = lists:split(50, Guids), + MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)], + {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(50, MsgIds), Ref = rabbit_guid:guid(), MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), %% check we don't contain any of the msgs we're about to publish - false = msg_store_contains(false, Guids, MSCState), + false = msg_store_contains(false, MsgIds, MSCState), %% publish the first half - ok = msg_store_write(Guids1stHalf, MSCState), + ok = msg_store_write(MsgIds1stHalf, MSCState), %% sync on the first half - ok = msg_store_sync(Guids1stHalf, MSCState), + ok = msg_store_sync(MsgIds1stHalf, MSCState), %% publish the second half - ok = msg_store_write(Guids2ndHalf, MSCState), + ok = msg_store_write(MsgIds2ndHalf, MSCState), %% sync on the first half again - the msg_store will be dirty, but %% we won't need the fsync - ok = msg_store_sync(Guids1stHalf, MSCState), + ok = msg_store_sync(MsgIds1stHalf, MSCState), %% check they're all in there - true = msg_store_contains(true, Guids, MSCState), + true = msg_store_contains(true, MsgIds, MSCState), %% publish the latter half twice so we hit the caching and ref count code - ok = msg_store_write(Guids2ndHalf, MSCState), + ok = msg_store_write(MsgIds2ndHalf, MSCState), %% check they're still all in there - true = msg_store_contains(true, Guids, MSCState), + true = msg_store_contains(true, MsgIds, MSCState), %% sync on the 2nd half, but do lots of individual syncs to try %% and cause coalescing to happen ok = lists:foldl( - fun (Guid, ok) -> rabbit_msg_store:sync( - [Guid], fun () -> Self ! {sync, Guid} end, - MSCState) - end, ok, Guids2ndHalf), + fun (MsgId, ok) -> rabbit_msg_store:sync( + [MsgId], fun () -> Self ! {sync, MsgId} end, + MSCState) + end, ok, MsgIds2ndHalf), lists:foldl( - fun(Guid, ok) -> + fun(MsgId, ok) -> receive - {sync, Guid} -> ok + {sync, MsgId} -> ok after 10000 -> - io:format("Sync from msg_store missing (guid: ~p)~n", - [Guid]), + io:format("Sync from msg_store missing (msg_id: ~p)~n", + [MsgId]), throw(timeout) end - end, ok, Guids2ndHalf), + end, ok, MsgIds2ndHalf), %% it's very likely we're not dirty here, so the 1st half sync %% should hit a different code path - ok = msg_store_sync(Guids1stHalf, MSCState), + ok = msg_store_sync(MsgIds1stHalf, MSCState), %% read them all - MSCState1 = msg_store_read(Guids, MSCState), + MSCState1 = msg_store_read(MsgIds, MSCState), %% read them all again - this will hit the cache, not disk - MSCState2 = msg_store_read(Guids, MSCState1), + MSCState2 = msg_store_read(MsgIds, MSCState1), %% remove them all - ok = rabbit_msg_store:remove(Guids, MSCState2), + ok = rabbit_msg_store:remove(MsgIds, MSCState2), %% check first half doesn't exist - false = msg_store_contains(false, Guids1stHalf, MSCState2), + false = msg_store_contains(false, MsgIds1stHalf, MSCState2), %% check second half does exist - true = msg_store_contains(true, Guids2ndHalf, MSCState2), + true = msg_store_contains(true, MsgIds2ndHalf, MSCState2), %% read the second half again - MSCState3 = msg_store_read(Guids2ndHalf, MSCState2), + MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2), %% release the second half, just for fun (aka code coverage) - ok = rabbit_msg_store:release(Guids2ndHalf, MSCState3), + ok = rabbit_msg_store:release(MsgIds2ndHalf, MSCState3), %% read the second half again, just for fun (aka code coverage) - MSCState4 = msg_store_read(Guids2ndHalf, MSCState3), + MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3), ok = rabbit_msg_store:client_terminate(MSCState4), %% stop and restart, preserving every other msg in 2nd half ok = rabbit_variable_queue:stop_msg_store(), ok = rabbit_variable_queue:start_msg_store( [], {fun ([]) -> finished; - ([Guid|GuidsTail]) - when length(GuidsTail) rem 2 == 0 -> - {Guid, 1, GuidsTail}; - ([Guid|GuidsTail]) -> - {Guid, 0, GuidsTail} - end, Guids2ndHalf}), + ([MsgId|MsgIdsTail]) + when length(MsgIdsTail) rem 2 == 0 -> + {MsgId, 1, MsgIdsTail}; + ([MsgId|MsgIdsTail]) -> + {MsgId, 0, MsgIdsTail} + end, MsgIds2ndHalf}), MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), %% check we have the right msgs left lists:foldl( - fun (Guid, Bool) -> - not(Bool = rabbit_msg_store:contains(Guid, MSCState5)) - end, false, Guids2ndHalf), + fun (MsgId, Bool) -> + not(Bool = rabbit_msg_store:contains(MsgId, MSCState5)) + end, false, MsgIds2ndHalf), ok = rabbit_msg_store:client_terminate(MSCState5), %% restart empty restart_msg_store_empty(), MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), %% check we don't contain any of the msgs - false = msg_store_contains(false, Guids, MSCState6), + false = msg_store_contains(false, MsgIds, MSCState6), %% publish the first half again - ok = msg_store_write(Guids1stHalf, MSCState6), + ok = msg_store_write(MsgIds1stHalf, MSCState6), %% this should force some sort of sync internally otherwise misread ok = rabbit_msg_store:client_terminate( - msg_store_read(Guids1stHalf, MSCState6)), + msg_store_read(MsgIds1stHalf, MSCState6)), MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref), - ok = rabbit_msg_store:remove(Guids1stHalf, MSCState7), + ok = rabbit_msg_store:remove(MsgIds1stHalf, MSCState7), ok = rabbit_msg_store:client_terminate(MSCState7), %% restart empty - restart_msg_store_empty(), %% now safe to reuse guids + restart_msg_store_empty(), %% now safe to reuse msg_ids %% push a lot of msgs in... at least 100 files worth {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit), PayloadSizeBits = 65536, BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)), - GuidsBig = [guid_bin(X) || X <- lists:seq(1, BigCount)], + MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)], Payload = << 0:PayloadSizeBits >>, ok = with_msg_store_client( ?PERSISTENT_MSG_STORE, Ref, fun (MSCStateM) -> - [ok = rabbit_msg_store:write(Guid, Payload, MSCStateM) || - Guid <- GuidsBig], + [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) || + MsgId <- MsgIdsBig], MSCStateM end), %% now read them to ensure we hit the fast client-side reading ok = foreach_with_msg_store_client( ?PERSISTENT_MSG_STORE, Ref, - fun (Guid, MSCStateM) -> + fun (MsgId, MSCStateM) -> {{ok, Payload}, MSCStateN} = rabbit_msg_store:read( - Guid, MSCStateM), + MsgId, MSCStateM), MSCStateN - end, GuidsBig), + end, MsgIdsBig), %% .., then 3s by 1... ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, - [guid_bin(X) || X <- lists:seq(BigCount, 1, -3)]), + [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]), %% .., then remove 3s by 2, from the young end first. This hits %% GC (under 50% good data left, but no empty files. Must GC). ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, - [guid_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]), + [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]), %% .., then remove 3s by 3, from the young end first. This hits %% GC... ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref, - [guid_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]), + [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]), %% ensure empty ok = with_msg_store_client( ?PERSISTENT_MSG_STORE, Ref, fun (MSCStateM) -> - false = msg_store_contains(false, GuidsBig, MSCStateM), + false = msg_store_contains(false, MsgIdsBig, MSCStateM), MSCStateM end), %% restart empty @@ -1808,8 +1808,8 @@ init_test_queue() -> PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef), Res = rabbit_queue_index:recover( TestQueue, Terms, false, - fun (Guid) -> - rabbit_msg_store:contains(Guid, PersistentClient) + fun (MsgId) -> + rabbit_msg_store:contains(MsgId, PersistentClient) end, fun nop/1), ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient), @@ -1840,25 +1840,25 @@ queue_index_publish(SeqIds, Persistent, Qi) -> false -> ?TRANSIENT_MSG_STORE end, MSCState = msg_store_client_init(MsgStore, Ref), - {A, B = [{_SeqId, LastGuidWritten} | _]} = + {A, B = [{_SeqId, LastMsgIdWritten} | _]} = lists:foldl( - fun (SeqId, {QiN, SeqIdsGuidsAcc}) -> - Guid = rabbit_guid:guid(), + fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) -> + MsgId = rabbit_guid:guid(), QiM = rabbit_queue_index:publish( - Guid, SeqId, #message_properties{}, Persistent, QiN), - ok = rabbit_msg_store:write(Guid, Guid, MSCState), - {QiM, [{SeqId, Guid} | SeqIdsGuidsAcc]} + MsgId, SeqId, #message_properties{}, Persistent, QiN), + ok = rabbit_msg_store:write(MsgId, MsgId, MSCState), + {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]} end, {Qi, []}, SeqIds), %% do this just to force all of the publishes through to the msg_store: - true = rabbit_msg_store:contains(LastGuidWritten, MSCState), + true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState), ok = rabbit_msg_store:client_delete_and_terminate(MSCState), {A, B}. verify_read_with_published(_Delivered, _Persistent, [], _) -> ok; verify_read_with_published(Delivered, Persistent, - [{Guid, SeqId, _Props, Persistent, Delivered}|Read], - [{SeqId, Guid}|Published]) -> + [{MsgId, SeqId, _Props, Persistent, Delivered}|Read], + [{SeqId, MsgId}|Published]) -> verify_read_with_published(Delivered, Persistent, Read, Published); verify_read_with_published(_Delivered, _Persistent, _Read, _Published) -> ko. @@ -1866,10 +1866,10 @@ verify_read_with_published(_Delivered, _Persistent, _Read, _Published) -> test_queue_index_props() -> with_empty_test_queue( fun(Qi0) -> - Guid = rabbit_guid:guid(), + MsgId = rabbit_guid:guid(), Props = #message_properties{expiry=12345}, - Qi1 = rabbit_queue_index:publish(Guid, 1, Props, true, Qi0), - {[{Guid, 1, Props, _, _}], Qi2} = + Qi1 = rabbit_queue_index:publish(MsgId, 1, Props, true, Qi0), + {[{MsgId, 1, Props, _, _}], Qi2} = rabbit_queue_index:read(1, 2, Qi1), Qi2 end), @@ -1891,19 +1891,19 @@ test_queue_index() -> with_empty_test_queue( fun (Qi0) -> {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0), - {Qi2, SeqIdsGuidsA} = queue_index_publish(SeqIdsA, false, Qi1), + {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1), {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2), {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3), ok = verify_read_with_published(false, false, ReadA, - lists:reverse(SeqIdsGuidsA)), + lists:reverse(SeqIdsMsgIdsA)), %% should get length back as 0, as all the msgs were transient {0, Qi6} = restart_test_queue(Qi4), {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6), - {Qi8, SeqIdsGuidsB} = queue_index_publish(SeqIdsB, true, Qi7), + {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7), {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8), {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9), ok = verify_read_with_published(false, true, ReadB, - lists:reverse(SeqIdsGuidsB)), + lists:reverse(SeqIdsMsgIdsB)), %% should get length back as MostOfASegment LenB = length(SeqIdsB), {LenB, Qi12} = restart_test_queue(Qi10), @@ -1911,7 +1911,7 @@ test_queue_index() -> Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13), {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14), ok = verify_read_with_published(true, true, ReadC, - lists:reverse(SeqIdsGuidsB)), + lists:reverse(SeqIdsMsgIdsB)), Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15), Qi17 = rabbit_queue_index:flush(Qi16), %% Everything will have gone now because #pubs == #acks @@ -1927,12 +1927,12 @@ test_queue_index() -> %% a) partial pub+del+ack, then move to new segment with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsC} = queue_index_publish(SeqIdsC, + {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC, false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2), Qi4 = rabbit_queue_index:flush(Qi3), - {Qi5, _SeqIdsGuidsC1} = queue_index_publish([SegmentSize], + {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize], false, Qi4), Qi5 end), @@ -1940,10 +1940,10 @@ test_queue_index() -> %% b) partial pub+del, then move to new segment, then ack all in old segment with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsC2} = queue_index_publish(SeqIdsC, + {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC, false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1), - {Qi3, _SeqIdsGuidsC3} = queue_index_publish([SegmentSize], + {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize], false, Qi2), Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3), rabbit_queue_index:flush(Qi4) @@ -1952,7 +1952,7 @@ test_queue_index() -> %% c) just fill up several segments of all pubs, then +dels, then +acks with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsD} = queue_index_publish(SeqIdsD, + {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD, false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1), Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2), @@ -1986,12 +1986,12 @@ test_queue_index() -> %% exercise journal_minus_segment, not segment_plus_journal. with_empty_test_queue( fun (Qi0) -> - {Qi1, _SeqIdsGuidsE} = queue_index_publish([0,1,2,4,5,7], + {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7], true, Qi0), Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1), Qi3 = rabbit_queue_index:ack([0], Qi2), {5, Qi4} = restart_test_queue(Qi3), - {Qi5, _SeqIdsGuidsF} = queue_index_publish([3,6,8], true, Qi4), + {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4), Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5), Qi7 = rabbit_queue_index:ack([1,2,3], Qi6), {5, Qi8} = restart_test_queue(Qi7), -- cgit v1.2.1 From 21525c0ad768914786c92b8a65ccf7baa42b13a6 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 19:32:39 +0000 Subject: cosmetic --- src/file_handle_cache.erl | 6 +-- src/gm.erl | 24 ++++----- src/gm_tests.erl | 14 ++--- src/rabbit_amqqueue.erl | 16 +++--- src/rabbit_amqqueue_process.erl | 22 ++++---- src/rabbit_auth_backend_internal.erl | 4 +- src/rabbit_auth_mechanism_amqplain.erl | 2 +- src/rabbit_basic.erl | 34 ++++++------ src/rabbit_binding.erl | 2 +- src/rabbit_channel.erl | 6 +-- src/rabbit_channel_sup.erl | 12 ++--- src/rabbit_client_sup.erl | 4 +- src/rabbit_direct.erl | 22 ++++---- src/rabbit_event.erl | 2 +- src/rabbit_exchange.erl | 8 +-- src/rabbit_exchange_type_topic.erl | 34 ++++++------ src/rabbit_memory_monitor.erl | 10 ++-- src/rabbit_misc.erl | 2 +- src/rabbit_mnesia.erl | 8 +-- src/rabbit_msg_file.erl | 4 +- src/rabbit_msg_store.erl | 4 +- src/rabbit_networking.erl | 4 +- src/rabbit_node_monitor.erl | 2 +- src/rabbit_prelaunch.erl | 14 ++--- src/rabbit_queue_index.erl | 10 ++-- src/rabbit_ssl.erl | 6 +-- src/rabbit_tests.erl | 56 ++++++++++---------- src/rabbit_types.erl | 96 +++++++++++++++++----------------- src/rabbit_upgrade.erl | 6 +-- src/rabbit_variable_queue.erl | 4 +- src/rabbit_vhost.erl | 18 +++---- 31 files changed, 228 insertions(+), 228 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index f41815d0..855427dd 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -242,7 +242,7 @@ -> val_or_error(ref())). -spec(close/1 :: (ref()) -> ok_or_error()). -spec(read/2 :: (ref(), non_neg_integer()) -> - val_or_error([char()] | binary()) | 'eof'). + val_or_error([char()] | binary()) | 'eof'). -spec(append/2 :: (ref(), iodata()) -> ok_or_error()). -spec(sync/1 :: (ref()) -> ok_or_error()). -spec(position/2 :: (ref(), position()) -> val_or_error(offset())). @@ -252,7 +252,7 @@ -spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())). -spec(flush/1 :: (ref()) -> ok_or_error()). -spec(copy/3 :: (ref(), ref(), non_neg_integer()) -> - val_or_error(non_neg_integer())). + val_or_error(non_neg_integer())). -spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok'). -spec(delete/1 :: (ref()) -> ok_or_error()). -spec(clear/1 :: (ref()) -> ok_or_error()). @@ -1117,7 +1117,7 @@ reduce(State = #fhc_state { open_pending = OpenPending, case CStates of [] -> ok; _ -> case (Sum / ClientCount) - - (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of + (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of AverageAge when AverageAge > 0 -> notify_age(CStates, AverageAge); _ -> diff --git a/src/gm.erl b/src/gm.erl index 70633a08..fd8d9b77 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -852,9 +852,9 @@ alive_view_members({_Ver, View}) -> all_known_members({_Ver, View}) -> ?DICT:fold( - fun (Member, #view_member { aliases = Aliases }, Acc) -> - ?SETS:to_list(Aliases) ++ [Member | Acc] - end, [], View). + fun (Member, #view_member { aliases = Aliases }, Acc) -> + ?SETS:to_list(Aliases) ++ [Member | Acc] + end, [], View). group_to_view(#gm_group { members = Members, version = Ver }) -> Alive = lists:filter(fun is_member_alive/1, Members), @@ -1037,15 +1037,15 @@ maybe_erase_aliases(State = #state { self = Self, #view_member { aliases = Aliases } = fetch_view_member(Self, View), {Erasable, MembersState1} = ?SETS:fold( - fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> - #member { last_pub = LP, last_ack = LA } = - find_member_or_blank(Id, MembersState), - case can_erase_view_member(Self, Id, LA, LP) of - true -> {[Id | ErasableAcc], - erase_member(Id, MembersStateAcc)}; - false -> Acc - end - end, {[], MembersState}, Aliases), + fun (Id, {ErasableAcc, MembersStateAcc} = Acc) -> + #member { last_pub = LP, last_ack = LA } = + find_member_or_blank(Id, MembersState), + case can_erase_view_member(Self, Id, LA, LP) of + true -> {[Id | ErasableAcc], + erase_member(Id, MembersStateAcc)}; + false -> Acc + end + end, {[], MembersState}, Aliases), State1 = State #state { members_state = MembersState1 }, case Erasable of [] -> {ok, State1}; diff --git a/src/gm_tests.erl b/src/gm_tests.erl index 65e9cff0..ca0ffd64 100644 --- a/src/gm_tests.erl +++ b/src/gm_tests.erl @@ -117,13 +117,13 @@ test_broadcast(Fun) -> with_two_members(test_broadcast_fun(Fun)). test_broadcast_fun(Fun) -> - fun (Pid, Pid2) -> - ok = Fun(Pid, magic_message), - passed = receive_or_throw({msg, Pid, Pid, magic_message}, - timeout_waiting_for_msg), - passed = receive_or_throw({msg, Pid2, Pid, magic_message}, - timeout_waiting_for_msg) - end. + fun (Pid, Pid2) -> + ok = Fun(Pid, magic_message), + passed = receive_or_throw({msg, Pid, Pid, magic_message}, + timeout_waiting_for_msg), + passed = receive_or_throw({msg, Pid2, Pid, magic_message}, + timeout_waiting_for_msg) + end. with_two_members(Fun) -> ok = gm:create_tables(), diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 46b78c39..7a996a98 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -52,7 +52,7 @@ -type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}). -type(msg_id() :: non_neg_integer()). -type(ok_or_errors() :: - 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}). + 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}). -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). @@ -100,13 +100,13 @@ -spec(emit_stats/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete_immediately/1 :: (rabbit_types:amqqueue()) -> 'ok'). -spec(delete/3 :: - (rabbit_types:amqqueue(), 'false', 'false') + (rabbit_types:amqqueue(), 'false', 'false') -> qlen(); - (rabbit_types:amqqueue(), 'true' , 'false') + (rabbit_types:amqqueue(), 'true' , 'false') -> qlen() | rabbit_types:error('in_use'); - (rabbit_types:amqqueue(), 'false', 'true' ) + (rabbit_types:amqqueue(), 'false', 'true' ) -> qlen() | rabbit_types:error('not_empty'); - (rabbit_types:amqqueue(), 'true' , 'true' ) + (rabbit_types:amqqueue(), 'true' , 'true' ) -> qlen() | rabbit_types:error('in_use') | rabbit_types:error('not_empty')). @@ -122,10 +122,10 @@ -spec(notify_down_all/2 :: ([pid()], pid()) -> ok_or_errors()). -spec(limit_all/3 :: ([pid()], pid(), pid() | 'undefined') -> ok_or_errors()). -spec(basic_get/3 :: (rabbit_types:amqqueue(), pid(), boolean()) -> - {'ok', non_neg_integer(), qmsg()} | 'empty'). + {'ok', non_neg_integer(), qmsg()} | 'empty'). -spec(basic_consume/7 :: - (rabbit_types:amqqueue(), boolean(), pid(), pid() | 'undefined', - rabbit_types:ctag(), boolean(), any()) + (rabbit_types:amqqueue(), boolean(), pid(), pid() | 'undefined', + rabbit_types:ctag(), boolean(), any()) -> rabbit_types:ok_or_error('exclusive_consume_unavailable')). -spec(basic_cancel/4 :: (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok'). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 44053593..dde87b69 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -485,7 +485,7 @@ attempt_delivery(#delivery{txn = Txn, message = Message}, {NeedsConfirming, State = #q{backing_queue = BQ, - backing_queue_state = BQS}}) -> + backing_queue_state = BQS}}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), {true, NeedsConfirming, @@ -722,10 +722,10 @@ i(Item, _) -> consumers(#q{active_consumers = ActiveConsumers, blocked_consumers = BlockedConsumers}) -> rabbit_misc:queue_fold( - fun ({ChPid, #consumer{tag = ConsumerTag, - ack_required = AckRequired}}, Acc) -> - [{ChPid, ConsumerTag, AckRequired} | Acc] - end, [], queue:join(ActiveConsumers, BlockedConsumers)). + fun ({ChPid, #consumer{tag = ConsumerTag, + ack_required = AckRequired}}, Acc) -> + [{ChPid, ConsumerTag, AckRequired} | Acc] + end, [], queue:join(ActiveConsumers, BlockedConsumers)). emit_stats(State) -> emit_stats(State, []). @@ -906,15 +906,15 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid, case is_ch_blocked(C) of true -> State1#q{ blocked_consumers = - add_consumer( - ChPid, Consumer, - State1#q.blocked_consumers)}; + add_consumer( + ChPid, Consumer, + State1#q.blocked_consumers)}; false -> run_message_queue( State1#q{ active_consumers = - add_consumer( - ChPid, Consumer, - State1#q.active_consumers)}) + add_consumer( + ChPid, Consumer, + State1#q.active_consumers)}) end, emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, not NoAck), diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index a564480b..3d005845 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -52,8 +52,8 @@ -spec(clear_admin/1 :: (rabbit_types:username()) -> 'ok'). -spec(list_users/0 :: () -> [{rabbit_types:username(), boolean()}]). -spec(lookup_user/1 :: (rabbit_types:username()) - -> rabbit_types:ok(rabbit_types:internal_user()) - | rabbit_types:error('not_found')). + -> rabbit_types:ok(rabbit_types:internal_user()) + | rabbit_types:error('not_found')). -spec(set_permissions/5 ::(rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(), regexp()) -> 'ok'). -spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost()) diff --git a/src/rabbit_auth_mechanism_amqplain.erl b/src/rabbit_auth_mechanism_amqplain.erl index 2168495d..b8682a46 100644 --- a/src/rabbit_auth_mechanism_amqplain.erl +++ b/src/rabbit_auth_mechanism_amqplain.erl @@ -54,5 +54,5 @@ handle_response(Response, _State) -> _ -> {protocol_error, "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field", - [LoginTable]} + [LoginTable]} end. diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 57aad808..8c930502 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -44,7 +44,7 @@ -spec(message/3 :: (rabbit_exchange:name(), rabbit_router:routing_key(), rabbit_types:decoded_content()) -> - rabbit_types:ok_or_error2(rabbit_types:message(), any())). + rabbit_types:ok_or_error2(rabbit_types:message(), any())). -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: @@ -107,21 +107,21 @@ strip_header(#content{properties = Props = #'P_basic'{headers = Headers}} false -> DecodedContent; {value, Found} -> Headers0 = lists:delete(Found, Headers), rabbit_binary_generator:clear_encoded_content( - DecodedContent#content{ - properties = Props#'P_basic'{ - headers = Headers0}}) + DecodedContent#content{ + properties = Props#'P_basic'{ + headers = Headers0}}) end. message(ExchangeName, RoutingKey, #content{properties = Props} = DecodedContent) -> try {ok, #basic_message{ - exchange_name = ExchangeName, - content = strip_header(DecodedContent, ?DELETED_HEADER), - guid = rabbit_guid:guid(), - is_persistent = is_message_persistent(DecodedContent), - routing_keys = [RoutingKey | - header_routes(Props#'P_basic'.headers)]}} + exchange_name = ExchangeName, + content = strip_header(DecodedContent, ?DELETED_HEADER), + guid = rabbit_guid:guid(), + is_persistent = is_message_persistent(DecodedContent), + routing_keys = [RoutingKey | + header_routes(Props#'P_basic'.headers)]}} catch {error, _Reason} = Error -> Error end. @@ -180,10 +180,10 @@ header_routes(undefined) -> []; header_routes(HeadersTable) -> lists:append( - [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of - {array, Routes} -> [Route || {longstr, Route} <- Routes]; - undefined -> []; - {Type, _Val} -> throw({error, {unacceptable_type_in_header, - Type, - binary_to_list(HeaderKey)}}) - end || HeaderKey <- ?ROUTING_HEADERS]). + [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of + {array, Routes} -> [Route || {longstr, Route} <- Routes]; + undefined -> []; + {Type, _Val} -> throw({error, {unacceptable_type_in_header, + Type, + binary_to_list(HeaderKey)}}) + end || HeaderKey <- ?ROUTING_HEADERS]). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 96a22dca..7ddb7814 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -70,7 +70,7 @@ rabbit_types:infos()). -spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). -spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) - -> [rabbit_types:infos()]). + -> [rabbit_types:infos()]). -spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()). -spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()). -spec(remove_for_destination/1 :: diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index e92421fc..5fccb542 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -68,9 +68,9 @@ -type(channel_number() :: non_neg_integer()). -spec(start_link/9 :: - (channel_number(), pid(), pid(), rabbit_types:protocol(), - rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), - pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> + (channel_number(), pid(), pid(), rabbit_types:protocol(), + rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), + pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> rabbit_types:ok_pid_or_error()). -spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok'). -spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(), diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 9cc407bc..8175ad80 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -68,12 +68,12 @@ start_link({direct, Channel, ClientChannelPid, Protocol, User, VHost, {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = supervisor2:start_child( - SupPid, - {channel, {rabbit_channel, start_link, - [Channel, ClientChannelPid, ClientChannelPid, Protocol, - User, VHost, Capabilities, Collector, - start_limiter_fun(SupPid)]}, - intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), + SupPid, + {channel, {rabbit_channel, start_link, + [Channel, ClientChannelPid, ClientChannelPid, Protocol, + User, VHost, Capabilities, Collector, + start_limiter_fun(SupPid)]}, + intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl index dbdc6cd4..15e92542 100644 --- a/src/rabbit_client_sup.erl +++ b/src/rabbit_client_sup.erl @@ -29,9 +29,9 @@ -ifdef(use_specs). -spec(start_link/1 :: (mfa()) -> - rabbit_types:ok_pid_or_error()). + rabbit_types:ok_pid_or_error()). -spec(start_link/2 :: ({'local', atom()}, mfa()) -> - rabbit_types:ok_pid_or_error()). + rabbit_types:ok_pid_or_error()). -endif. diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 586563f6..a2693c69 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -26,8 +26,8 @@ -spec(boot/0 :: () -> 'ok'). -spec(connect/4 :: (binary(), binary(), binary(), rabbit_types:protocol()) -> - {'ok', {rabbit_types:user(), - rabbit_framing:amqp_table()}}). + {'ok', {rabbit_types:user(), + rabbit_framing:amqp_table()}}). -spec(start_channel/7 :: (rabbit_channel:channel_number(), pid(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), @@ -40,12 +40,12 @@ boot() -> {ok, _} = supervisor2:start_child( - rabbit_sup, - {rabbit_direct_client_sup, - {rabbit_client_sup, start_link, - [{local, rabbit_direct_client_sup}, - {rabbit_channel_sup, start_link, []}]}, - transient, infinity, supervisor, [rabbit_client_sup]}), + rabbit_sup, + {rabbit_direct_client_sup, + {rabbit_client_sup, start_link, + [{local, rabbit_direct_client_sup}, + {rabbit_channel_sup, start_link, []}]}, + transient, infinity, supervisor, [rabbit_client_sup]}), ok. %%---------------------------------------------------------------------------- @@ -73,7 +73,7 @@ start_channel(Number, ClientChannelPid, Protocol, User, VHost, Capabilities, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( - rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, Protocol, User, VHost, - Capabilities, Collector}]), + rabbit_direct_client_sup, + [{direct, Number, ClientChannelPid, Protocol, User, VHost, + Capabilities, Collector}]), {ok, ChannelPid}. diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 40651d36..9ed532db 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -101,7 +101,7 @@ ensure_stats_timer(State = #state{level = none}, _Fun) -> State; ensure_stats_timer(State = #state{timer = undefined}, Fun) -> {ok, TRef} = timer:apply_after(?STATS_INTERVAL, - erlang, apply, [Fun, []]), + erlang, apply, [Fun, []]), State#state{timer = TRef}; ensure_stats_timer(State, _Fun) -> State. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 92259195..a463e570 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -62,7 +62,7 @@ -> rabbit_types:infos()). -spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). -spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) - -> [rabbit_types:infos()]). + -> [rabbit_types:infos()]). -spec(publish/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> {rabbit_router:routing_result(), [pid()]}). -spec(delete/2 :: @@ -266,9 +266,9 @@ process_route(#resource{kind = queue} = QName, call_with_exchange(XName, Fun, PrePostCommitFun) -> rabbit_misc:execute_mnesia_transaction( fun () -> case mnesia:read({rabbit_exchange, XName}) of - [] -> {error, not_found}; - [X] -> Fun(X) - end + [] -> {error, not_found}; + [X] -> Fun(X) + end end, PrePostCommitFun). delete(XName, IfUnused) -> diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2363d05e..f12661d4 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -42,8 +42,8 @@ description() -> route(#exchange{name = X}, #delivery{message = #basic_message{routing_keys = Routes}}) -> lists:append([begin - Words = split_topic_key(RKey), - mnesia:async_dirty(fun trie_match/2, [X, Words]) + Words = split_topic_key(RKey), + mnesia:async_dirty(fun trie_match/2, [X, Words]) end || RKey <- Routes]). validate(_X) -> ok. @@ -51,9 +51,9 @@ create(_Tx, _X) -> ok. recover(_Exchange, Bs) -> rabbit_misc:execute_mnesia_transaction( - fun () -> - lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end). + fun () -> + lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) + end). delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), @@ -166,9 +166,9 @@ trie_child(X, Node, Word) -> trie_bindings(X, Node) -> MatchHead = #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - destination = '$1'}}, + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + destination = '$1'}}, mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]). trie_add_edge(X, FromNode, ToNode, W) -> @@ -194,9 +194,9 @@ trie_remove_binding(X, Node, D) -> trie_binding_op(X, Node, D, Op) -> ok = Op(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - destination = D}}, + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + destination = D}}, write). trie_has_any_children(X, Node) -> @@ -209,10 +209,10 @@ trie_has_any_children(X, Node) -> trie_has_any_bindings(X, Node) -> has_any(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - _ = '_'}, - _ = '_'}). + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + _ = '_'}, + _ = '_'}). trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, @@ -223,8 +223,8 @@ trie_remove_all_edges(X) -> trie_remove_all_bindings(X) -> remove_all(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, _ = '_'}, - _ = '_'}). + trie_binding = #trie_binding{exchange_name = X, _ = '_'}, + _ = '_'}). has_any(Table, MatchHead) -> Select = mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read), diff --git a/src/rabbit_memory_monitor.erl b/src/rabbit_memory_monitor.erl index 2f8c940b..996b0a98 100644 --- a/src/rabbit_memory_monitor.erl +++ b/src/rabbit_memory_monitor.erl @@ -111,11 +111,11 @@ stop() -> init([]) -> MemoryLimit = trunc(?MEMORY_LIMIT_SCALING * - (try - vm_memory_monitor:get_memory_limit() - catch - exit:{noproc, _} -> ?MEMORY_SIZE_FOR_DISABLED_VMM - end)), + (try + vm_memory_monitor:get_memory_limit() + catch + exit:{noproc, _} -> ?MEMORY_SIZE_FOR_DISABLED_VMM + end)), {ok, TRef} = timer:apply_interval(?DEFAULT_UPDATE_INTERVAL, ?SERVER, update, []), diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index abc27c5f..5579dbab 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -105,7 +105,7 @@ ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')). -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) - -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). + -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). -spec(r/2 :: (rabbit_types:vhost(), K) -> rabbit_types:r3(rabbit_types:vhost(), K, '_') when is_subtype(K, atom())). diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index fc95b77b..99fa6ace 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -129,10 +129,10 @@ empty_ram_only_tables() -> Node = node(), lists:foreach( fun (TabName) -> - case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of - true -> {atomic, ok} = mnesia:clear_table(TabName); - false -> ok - end + case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of + true -> {atomic, ok} = mnesia:clear_table(TabName); + false -> ok + end end, table_names()), ok. diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 55e6ac47..4b97d74c 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -46,8 +46,8 @@ rabbit_types:ok_or_error2({rabbit_guid:guid(), msg()}, any())). -spec(scan/4 :: (io_device(), file_size(), - fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), - A) -> {'ok', A, position()}). + fun (({rabbit_guid:guid(), msg_size(), position(), binary()}, A) -> A), + A) -> {'ok', A, position()}). -endif. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 9e65e442..d1b8f707 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -75,7 +75,7 @@ successfully_recovered, %% boolean: did we recover state? file_size_limit, %% how big are our files allowed to get? cref_to_guids %% client ref to synced messages mapping - }). + }). -record(client_msstate, { server, @@ -89,7 +89,7 @@ file_summary_ets, dedup_cache_ets, cur_file_cache_ets - }). + }). -record(file_summary, {file, valid_total_size, left, right, file_size, locked, readers}). diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 36f61628..fd545a68 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -67,7 +67,7 @@ -spec(close_connection/2 :: (pid(), string()) -> 'ok'). -spec(on_node_down/1 :: (node()) -> 'ok'). -spec(check_tcp_listener_address/2 :: (atom(), listener_config()) - -> [{inet:ip_address(), ip_port(), family(), atom()}]). + -> [{inet:ip_address(), ip_port(), family(), atom()}]). -endif. @@ -98,7 +98,7 @@ boot_ssl() -> verify_peer -> [{verify_fun, fun([]) -> true; ([_|_]) -> false end} - | SslOptsConfig] + | SslOptsConfig] end, [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners], ok diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl index 817abaa2..ebae48d4 100644 --- a/src/rabbit_node_monitor.erl +++ b/src/rabbit_node_monitor.erl @@ -76,7 +76,7 @@ handle_cast(_Msg, State) -> handle_info({nodedown, Node}, State) -> rabbit_log:info("node ~p down~n", [Node]), ok = handle_dead_rabbit(Node), - {noreply, State}; + {noreply, State}; handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, State) -> rabbit_log:info("node ~p lost 'rabbit'~n", [Node]), ok = handle_dead_rabbit(Node), diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index d9d92788..7bb8c0ea 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -250,13 +250,13 @@ duplicate_node_check(NodeStr) -> case net_adm:names(NodeHost) of {ok, NamePorts} -> case proplists:is_defined(NodeName, NamePorts) of - true -> io:format("node with name ~p " - "already running on ~p~n", - [NodeName, NodeHost]), - [io:format(Fmt ++ "~n", Args) || - {Fmt, Args} <- rabbit_control:diagnostics(Node)], - terminate(?ERROR_CODE); - false -> ok + true -> io:format("node with name ~p " + "already running on ~p~n", + [NodeName, NodeHost]), + [io:format(Fmt ++ "~n", Args) || + {Fmt, Args} <- rabbit_control:diagnostics(Node)], + terminate(?ERROR_CODE); + false -> ok end; {error, EpmdReason} -> terminate("unexpected epmd error: ~p~n", [EpmdReason]) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 76b1136f..bc329947 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -145,8 +145,8 @@ %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * - (?PUBLISH_RECORD_LENGTH_BYTES + - (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). + (?PUBLISH_RECORD_LENGTH_BYTES + + (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). %% ---- misc ---- @@ -177,7 +177,7 @@ path :: file:filename(), journal_entries :: array(), unacked :: non_neg_integer() - })). + })). -type(seq_id() :: integer()). -type(seg_dict() :: {dict(), [segment()]}). -type(on_sync_fun() :: fun ((gb_set()) -> ok)). @@ -188,10 +188,10 @@ max_journal_entries :: non_neg_integer(), on_sync :: on_sync_fun(), unsynced_guids :: [rabbit_guid:guid()] - }). + }). -type(startup_fun_state() :: {fun ((A) -> 'finished' | {rabbit_guid:guid(), non_neg_integer(), A}), - A}). + A}). -type(shutdown_terms() :: [any()]). -spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()). diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index e831ee51..1953b6b8 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -87,8 +87,8 @@ cert_info(F, Cert) -> find_by_type(Type, {rdnSequence, RDNs}) -> case [V || #'AttributeTypeAndValue'{type = T, value = V} - <- lists:flatten(RDNs), - T == Type] of + <- lists:flatten(RDNs), + T == Type] of [{printableString, S}] -> S; [] -> not_found end. @@ -166,7 +166,7 @@ format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; true -> S end; format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, - Min1, Min2, S1, S2, $Z]}) -> + Min1, Min2, S1, S2, $Z]}) -> io_lib:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ", [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]); format_asn1_value(V) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c6250df..b72b3e49 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -693,23 +693,23 @@ test_topic_matching() -> exchange_op_callback(X, Fun, ExtraArgs) -> rabbit_misc:execute_mnesia_transaction( - fun () -> rabbit_exchange:callback(X, Fun, [true, X] ++ ExtraArgs) end), + fun () -> rabbit_exchange:callback(X, Fun, [true, X] ++ ExtraArgs) end), rabbit_exchange:callback(X, Fun, [false, X] ++ ExtraArgs). test_topic_expect_match(X, List) -> lists:foreach( - fun ({Key, Expected}) -> - BinKey = list_to_binary(Key), - Res = rabbit_exchange_type_topic:route( - X, #delivery{message = #basic_message{routing_keys = + fun ({Key, Expected}) -> + BinKey = list_to_binary(Key), + Res = rabbit_exchange_type_topic:route( + X, #delivery{message = #basic_message{routing_keys = [BinKey]}}), - ExpectedRes = lists:map( - fun (Q) -> #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)} - end, Expected), - true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) - end, List). + ExpectedRes = lists:map( + fun (Q) -> #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)} + end, Expected), + true = (lists:usort(ExpectedRes) =:= lists:usort(Res)) + end, List). test_app_management() -> %% starting, stopping, status @@ -818,7 +818,7 @@ test_log_management_during_startup() -> ok = delete_log_handlers([sasl_report_tty_h]), ok = case catch control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, - log_rotation_tty_no_handlers_test}); + log_rotation_tty_no_handlers_test}); {error, {cannot_log_to_tty, _, _}} -> ok end, @@ -843,8 +843,8 @@ test_log_management_during_startup() -> ok = add_log_handlers([{error_logger_file_h, MainLog}]), ok = case control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, - log_rotation_no_write_permission_dir_test}); - {error, {cannot_log_to_file, _, _}} -> ok + log_rotation_no_write_permission_dir_test}); + {error, {cannot_log_to_file, _, _}} -> ok end, %% start application with logging to a subdirectory which @@ -854,9 +854,9 @@ test_log_management_during_startup() -> ok = add_log_handlers([{error_logger_file_h, MainLog}]), ok = case control_action(start_app, []) of ok -> exit({got_success_but_expected_failure, - log_rotatation_parent_dirs_test}); + log_rotatation_parent_dirs_test}); {error, {cannot_log_to_file, _, - {error, {cannot_create_parent_dirs, _, eacces}}}} -> ok + {error, {cannot_create_parent_dirs, _, eacces}}}} -> ok end, ok = set_permissions(TmpDir, 8#00700), ok = set_permissions(TmpLog, 8#00600), @@ -1143,7 +1143,7 @@ test_server_status() -> [_|_] = rabbit_binding:list_for_source( rabbit_misc:r(<<"/">>, exchange, <<"">>)), [_] = rabbit_binding:list_for_destination( - rabbit_misc:r(<<"/">>, queue, <<"foo">>)), + rabbit_misc:r(<<"/">>, queue, <<"foo">>)), [_] = rabbit_binding:list_for_source_and_destination( rabbit_misc:r(<<"/">>, exchange, <<"">>), rabbit_misc:r(<<"/">>, queue, <<"foo">>)), @@ -1305,9 +1305,9 @@ test_delegates_async(SecondaryNode) -> make_responder(FMsg) -> make_responder(FMsg, timeout). make_responder(FMsg, Throw) -> fun () -> - receive Msg -> FMsg(Msg) - after 1000 -> throw(Throw) - end + receive Msg -> FMsg(Msg) + after 1000 -> throw(Throw) + end end. spawn_responders(Node, Responder, Count) -> @@ -1318,10 +1318,10 @@ await_response(0) -> await_response(Count) -> receive response -> ok, - await_response(Count - 1) + await_response(Count - 1) after 1000 -> - io:format("Async reply not received~n"), - throw(timeout) + io:format("Async reply not received~n"), + throw(timeout) end. must_exit(Fun) -> @@ -1337,7 +1337,7 @@ test_delegates_sync(SecondaryNode) -> BadSender = fun (_Pid) -> exit(exception) end, Responder = make_responder(fun ({'$gen_call', From, invoked}) -> - gen_server:reply(From, response) + gen_server:reply(From, response) end), BadResponder = make_responder(fun ({'$gen_call', From, invoked}) -> @@ -1349,7 +1349,7 @@ test_delegates_sync(SecondaryNode) -> must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end), must_exit(fun () -> - delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end), + delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end), LocalGoodPids = spawn_responders(node(), Responder, 2), RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2), @@ -1953,7 +1953,7 @@ test_queue_index() -> with_empty_test_queue( fun (Qi0) -> {Qi1, _SeqIdsGuidsD} = queue_index_publish(SeqIdsD, - false, Qi0), + false, Qi0), Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1), Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2), rabbit_queue_index:flush(Qi3) @@ -2195,7 +2195,7 @@ check_variable_queue_status(VQ0, Props) -> variable_queue_wait_for_shuffling_end(VQ) -> case rabbit_variable_queue:needs_idle_timeout(VQ) of true -> variable_queue_wait_for_shuffling_end( - rabbit_variable_queue:idle_timeout(VQ)); + rabbit_variable_queue:idle_timeout(VQ)); false -> VQ end. diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index ab2300c0..a11595e5 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -42,39 +42,39 @@ %% TODO: make this more precise by tying specific class_ids to %% specific properties -type(undecoded_content() :: - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: 'none', - properties_bin :: binary(), - payload_fragments_rev :: [binary()]} | - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: rabbit_framing:amqp_property_record(), - properties_bin :: 'none', - payload_fragments_rev :: [binary()]}). + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: 'none', + properties_bin :: binary(), + payload_fragments_rev :: [binary()]} | + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: rabbit_framing:amqp_property_record(), + properties_bin :: 'none', + payload_fragments_rev :: [binary()]}). -type(unencoded_content() :: undecoded_content()). -type(decoded_content() :: - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: rabbit_framing:amqp_property_record(), - properties_bin :: maybe(binary()), - payload_fragments_rev :: [binary()]}). + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: rabbit_framing:amqp_property_record(), + properties_bin :: maybe(binary()), + payload_fragments_rev :: [binary()]}). -type(encoded_content() :: - #content{class_id :: rabbit_framing:amqp_class_id(), - properties :: maybe(rabbit_framing:amqp_property_record()), - properties_bin :: binary(), - payload_fragments_rev :: [binary()]}). + #content{class_id :: rabbit_framing:amqp_class_id(), + properties :: maybe(rabbit_framing:amqp_property_record()), + properties_bin :: binary(), + payload_fragments_rev :: [binary()]}). -type(content() :: undecoded_content() | decoded_content()). -type(basic_message() :: - #basic_message{exchange_name :: rabbit_exchange:name(), - routing_keys :: [rabbit_router:routing_key()], - content :: content(), - guid :: rabbit_guid:guid(), - is_persistent :: boolean()}). + #basic_message{exchange_name :: rabbit_exchange:name(), + routing_keys :: [rabbit_router:routing_key()], + content :: content(), + guid :: rabbit_guid:guid(), + is_persistent :: boolean()}). -type(message() :: basic_message()). -type(delivery() :: - #delivery{mandatory :: boolean(), - immediate :: boolean(), - txn :: maybe(txn()), - sender :: pid(), - message :: message()}). + #delivery{mandatory :: boolean(), + immediate :: boolean(), + txn :: maybe(txn()), + sender :: pid(), + message :: message()}). -type(message_properties() :: #message_properties{expiry :: pos_integer() | 'undefined', needs_confirming :: boolean()}). @@ -89,9 +89,9 @@ -type(infos() :: [info()]). -type(amqp_error() :: - #amqp_error{name :: rabbit_framing:amqp_exception(), - explanation :: string(), - method :: rabbit_framing:amqp_method_name()}). + #amqp_error{name :: rabbit_framing:amqp_exception(), + explanation :: string(), + method :: rabbit_framing:amqp_method_name()}). -type(r(Kind) :: r2(vhost(), Kind)). @@ -103,34 +103,34 @@ name :: Name}). -type(listener() :: - #listener{node :: node(), - protocol :: atom(), - host :: rabbit_networking:hostname(), - port :: rabbit_networking:ip_port()}). + #listener{node :: node(), + protocol :: atom(), + host :: rabbit_networking:hostname(), + port :: rabbit_networking:ip_port()}). -type(binding_source() :: rabbit_exchange:name()). -type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()). -type(binding() :: - #binding{source :: rabbit_exchange:name(), - destination :: binding_destination(), - key :: rabbit_binding:key(), - args :: rabbit_framing:amqp_table()}). + #binding{source :: rabbit_exchange:name(), + destination :: binding_destination(), + key :: rabbit_binding:key(), + args :: rabbit_framing:amqp_table()}). -type(amqqueue() :: - #amqqueue{name :: rabbit_amqqueue:name(), - durable :: boolean(), - auto_delete :: boolean(), - exclusive_owner :: rabbit_types:maybe(pid()), - arguments :: rabbit_framing:amqp_table(), - pid :: rabbit_types:maybe(pid())}). + #amqqueue{name :: rabbit_amqqueue:name(), + durable :: boolean(), + auto_delete :: boolean(), + exclusive_owner :: rabbit_types:maybe(pid()), + arguments :: rabbit_framing:amqp_table(), + pid :: rabbit_types:maybe(pid())}). -type(exchange() :: - #exchange{name :: rabbit_exchange:name(), - type :: rabbit_exchange:type(), - durable :: boolean(), - auto_delete :: boolean(), - arguments :: rabbit_framing:amqp_table()}). + #exchange{name :: rabbit_exchange:name(), + type :: rabbit_exchange:type(), + durable :: boolean(), + auto_delete :: boolean(), + arguments :: rabbit_framing:amqp_table()}). -type(connection() :: pid()). diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 89acc10c..ebda5d03 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -106,9 +106,9 @@ upgrades_to_apply(Heads, G) -> %% everything we've already applied. Subtract that from all %% vertices: that's what we have to apply. Unsorted = sets:to_list( - sets:subtract( - sets:from_list(digraph:vertices(G)), - sets:from_list(digraph_utils:reaching(Heads, G)))), + sets:subtract( + sets:from_list(digraph:vertices(G)), + sets:from_list(digraph_utils:reaching(Heads, G)))), %% Form a subgraph from that list and find a topological ordering %% so we can invoke them in order. [element(2, digraph:vertex(G, StepName)) || diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 58a28d32..6a461a77 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -268,13 +268,13 @@ msg_on_disk, index_on_disk, msg_props - }). + }). -record(delta, { start_seq_id, %% start_seq_id is inclusive count, end_seq_id %% end_seq_id is exclusive - }). + }). -record(tx, { pending_messages, pending_acks }). diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl index efebef06..24c130ed 100644 --- a/src/rabbit_vhost.erl +++ b/src/rabbit_vhost.erl @@ -48,15 +48,15 @@ add(VHostPath) -> ok; (ok, false) -> [rabbit_exchange:declare( - rabbit_misc:r(VHostPath, exchange, Name), - Type, true, false, false, []) || - {Name,Type} <- - [{<<"">>, direct}, - {<<"amq.direct">>, direct}, - {<<"amq.topic">>, topic}, - {<<"amq.match">>, headers}, %% per 0-9-1 pdf - {<<"amq.headers">>, headers}, %% per 0-9-1 xml - {<<"amq.fanout">>, fanout}]], + rabbit_misc:r(VHostPath, exchange, Name), + Type, true, false, false, []) || + {Name,Type} <- + [{<<"">>, direct}, + {<<"amq.direct">>, direct}, + {<<"amq.topic">>, topic}, + {<<"amq.match">>, headers}, %% per 0-9-1 pdf + {<<"amq.headers">>, headers}, %% per 0-9-1 xml + {<<"amq.fanout">>, fanout}]], ok end), rabbit_log:info("Added vhost ~p~n", [VHostPath]), -- cgit v1.2.1 From e80b3162f252dcda613583ef2e6b271b0c5c4deb Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 19:49:17 +0000 Subject: cosmetic - comment indentation --- src/gen_server2.erl | 4 +- src/rabbit.erl | 2 +- src/rabbit_amqqueue_process.erl | 4 +- src/rabbit_basic.erl | 2 +- src/rabbit_binary_generator.erl | 13 +-- src/rabbit_mnesia.erl | 14 +-- src/rabbit_networking.erl | 4 +- src/rabbit_reader.erl | 4 +- src/rabbit_tests.erl | 234 ++++++++++++++++++++-------------------- src/rabbit_writer.erl | 10 +- 10 files changed, 146 insertions(+), 145 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 94296f97..43e0a8f5 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -453,8 +453,8 @@ unregister_name({global,Name}) -> _ = global:unregister_name(Name); unregister_name(Pid) when is_pid(Pid) -> Pid; -% Under R12 let's just ignore it, as we have a single term as Name. -% On R13 it will never get here, as we get tuple with 'local/global' atom. +%% Under R12 let's just ignore it, as we have a single term as Name. +%% On R13 it will never get here, as we get tuple with 'local/global' atom. unregister_name(_Name) -> ok. extend_backoff(undefined) -> diff --git a/src/rabbit.erl b/src/rabbit.erl index 6eb59c3e..c9a929ae 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -375,7 +375,7 @@ config_files() -> error -> [] end. -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- print_banner() -> {ok, Product} = application:get_key(id), diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index dde87b69..7719dfe7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -33,7 +33,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2, prioritise_info/2]). -% Queue's state +%% Queue's state -record(q, {q, exclusive_consumer, has_had_consumers, @@ -747,7 +747,7 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> {channel, ChPid}, {queue, self()}]). -%--------------------------------------------------------------------------- +%%---------------------------------------------------------------------------- prioritise_call(Msg, _From, _State) -> case Msg of diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 8c930502..f9a8ee1d 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -175,7 +175,7 @@ is_message_persistent(#content{properties = #'P_basic'{ Other -> throw({error, {delivery_mode_unknown, Other}}) end. -% Extract CC routes from headers +%% Extract CC routes from headers header_routes(undefined) -> []; header_routes(HeadersTable) -> diff --git a/src/rabbit_binary_generator.erl b/src/rabbit_binary_generator.erl index dc81ace6..68511a32 100644 --- a/src/rabbit_binary_generator.erl +++ b/src/rabbit_binary_generator.erl @@ -18,12 +18,13 @@ -include("rabbit_framing.hrl"). -include("rabbit.hrl"). -% EMPTY_CONTENT_BODY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1 -% - 1 byte of frame type -% - 2 bytes of channel number -% - 4 bytes of frame payload length -% - 1 byte of payload trailer FRAME_END byte -% See definition of check_empty_content_body_frame_size/0, an assertion called at startup. +%% EMPTY_CONTENT_BODY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1 +%% - 1 byte of frame type +%% - 2 bytes of channel number +%% - 4 bytes of frame payload length +%% - 1 byte of payload trailer FRAME_END byte +%% See definition of check_empty_content_body_frame_size/0, +%% an assertion called at startup. -define(EMPTY_CONTENT_BODY_FRAME_SIZE, 8). -export([build_simple_method_frame/3, diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 99fa6ace..66436920 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -519,13 +519,13 @@ create_local_table_copies(Type) -> HasDiscOnlyCopies -> disc_only_copies; true -> ram_copies end; -%% unused code - commented out to keep dialyzer happy -%% Type =:= disc_only -> -%% if -%% HasDiscCopies or HasDiscOnlyCopies -> -%% disc_only_copies; -%% true -> ram_copies -%% end; +%%% unused code - commented out to keep dialyzer happy +%%% Type =:= disc_only -> +%%% if +%%% HasDiscCopies or HasDiscOnlyCopies -> +%%% disc_only_copies; +%%% true -> ram_copies +%%% end; Type =:= ram -> ram_copies end, diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index fd545a68..877d2cf7 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -90,8 +90,8 @@ boot_ssl() -> {ok, SslListeners} -> ok = rabbit_misc:start_applications([crypto, public_key, ssl]), {ok, SslOptsConfig} = application:get_env(ssl_options), - % unknown_ca errors are silently ignored prior to R14B unless we - % supply this verify_fun - remove when at least R14B is required + %% unknown_ca errors are silently ignored prior to R14B unless we + %% supply this verify_fun - remove when at least R14B is required SslOpts = case proplists:get_value(verify, SslOptsConfig, verify_none) of verify_none -> SslOptsConfig; diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index b172db56..f9a3d9c7 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -37,7 +37,7 @@ -define(SILENT_CLOSE_DELAY, 3). -define(FRAME_MAX, 131072). %% set to zero once QPid fix their negotiation -%--------------------------------------------------------------------------- +%%-------------------------------------------------------------------------- -record(v1, {parent, sock, connection, callback, recv_length, recv_ref, connection_state, queue_collector, heartbeater, stats_timer, @@ -62,7 +62,7 @@ State#v1.connection_state =:= blocking orelse State#v1.connection_state =:= blocked)). -%%---------------------------------------------------------------------------- +%%-------------------------------------------------------------------------- -ifdef(use_specs). diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index b72b3e49..88b58166 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -425,35 +425,35 @@ test_content_properties() -> [{<<"one">>, signedint, 1}, {<<"two">>, signedint, 2}]}]}], << - % property-flags - 16#8000:16, + %% property-flags + 16#8000:16, - % property-list: + %% property-list: - % table - 117:32, % table length in bytes + %% table + 117:32, % table length in bytes - 11,"a signedint", % name - "I",12345678:32, % type and value + 11,"a signedint", % name + "I",12345678:32, % type and value - 9,"a longstr", - "S",10:32,"yes please", + 9,"a longstr", + "S",10:32,"yes please", - 9,"a decimal", - "D",123,12345678:32, + 9,"a decimal", + "D",123,12345678:32, - 11,"a timestamp", - "T", 123456789012345:64, + 11,"a timestamp", + "T", 123456789012345:64, - 14,"a nested table", - "F", - 18:32, + 14,"a nested table", + "F", + 18:32, - 3,"one", - "I",1:32, + 3,"one", + "I",1:32, - 3,"two", - "I",2:32 >>), + 3,"two", + "I",2:32 >>), case catch rabbit_binary_parser:parse_properties([bit, bit, bit, bit], <<16#A0,0,1>>) of {'EXIT', content_properties_binary_overflow} -> passed; V -> exit({got_success_but_expected_failure, V}) @@ -480,28 +480,28 @@ test_field_values() -> ]}], << - % property-flags - 16#8000:16, - % table length in bytes - 228:32, - - 7,"longstr", "S", 21:32, "Here is a long string", % = 34 - 9,"signedint", "I", 12345:32/signed, % + 15 = 49 - 7,"decimal", "D", 3, 123456:32, % + 14 = 63 - 9,"timestamp", "T", 109876543209876:64, % + 19 = 82 - 5,"table", "F", 31:32, % length of table % + 11 = 93 - 3,"one", "I", 54321:32, % + 9 = 102 - 3,"two", "S", 13:32, "A long string",% + 22 = 124 - 4,"byte", "b", 255:8, % + 7 = 131 - 4,"long", "l", 1234567890:64, % + 14 = 145 - 5,"short", "s", 655:16, % + 9 = 154 - 4,"bool", "t", 1, % + 7 = 161 - 6,"binary", "x", 15:32, "a binary string", % + 27 = 188 - 4,"void", "V", % + 6 = 194 - 5,"array", "A", 23:32, % + 11 = 205 - "I", 54321:32, % + 5 = 210 - "S", 13:32, "A long string" % + 18 = 228 - >>), + %% property-flags + 16#8000:16, + %% table length in bytes + 228:32, + + 7,"longstr", "S", 21:32, "Here is a long string", % = 34 + 9,"signedint", "I", 12345:32/signed, % + 15 = 49 + 7,"decimal", "D", 3, 123456:32, % + 14 = 63 + 9,"timestamp", "T", 109876543209876:64, % + 19 = 82 + 5,"table", "F", 31:32, % length of table % + 11 = 93 + 3,"one", "I", 54321:32, % + 9 = 102 + 3,"two", "S", 13:32, "A long string", % + 22 = 124 + 4,"byte", "b", 255:8, % + 7 = 131 + 4,"long", "l", 1234567890:64, % + 14 = 145 + 5,"short", "s", 655:16, % + 9 = 154 + 4,"bool", "t", 1, % + 7 = 161 + 6,"binary", "x", 15:32, "a binary string", % + 27 = 188 + 4,"void", "V", % + 6 = 194 + 5,"array", "A", 23:32, % + 11 = 205 + "I", 54321:32, % + 5 = 210 + "S", 13:32, "A long string" % + 18 = 228 + >>), passed. %% Test that content frames don't exceed frame-max @@ -598,65 +598,65 @@ test_topic_matching() -> %% add some bindings Bindings = lists:map( - fun ({Key, Q}) -> - #binding{source = XName, - key = list_to_binary(Key), - destination = #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)}} - end, [{"a.b.c", "t1"}, - {"a.*.c", "t2"}, - {"a.#.b", "t3"}, - {"a.b.b.c", "t4"}, - {"#", "t5"}, - {"#.#", "t6"}, - {"#.b", "t7"}, - {"*.*", "t8"}, - {"a.*", "t9"}, - {"*.b.c", "t10"}, - {"a.#", "t11"}, - {"a.#.#", "t12"}, - {"b.b.c", "t13"}, - {"a.b.b", "t14"}, - {"a.b", "t15"}, - {"b.c", "t16"}, - {"", "t17"}, - {"*.*.*", "t18"}, - {"vodka.martini", "t19"}, - {"a.b.c", "t20"}, - {"*.#", "t21"}, - {"#.*.#", "t22"}, - {"*.#.#", "t23"}, - {"#.#.#", "t24"}, - {"*", "t25"}, - {"#.b.#", "t26"}]), + fun ({Key, Q}) -> + #binding{source = XName, + key = list_to_binary(Key), + destination = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}} + end, [{"a.b.c", "t1"}, + {"a.*.c", "t2"}, + {"a.#.b", "t3"}, + {"a.b.b.c", "t4"}, + {"#", "t5"}, + {"#.#", "t6"}, + {"#.b", "t7"}, + {"*.*", "t8"}, + {"a.*", "t9"}, + {"*.b.c", "t10"}, + {"a.#", "t11"}, + {"a.#.#", "t12"}, + {"b.b.c", "t13"}, + {"a.b.b", "t14"}, + {"a.b", "t15"}, + {"b.c", "t16"}, + {"", "t17"}, + {"*.*.*", "t18"}, + {"vodka.martini", "t19"}, + {"a.b.c", "t20"}, + {"*.#", "t21"}, + {"#.*.#", "t22"}, + {"*.#.#", "t23"}, + {"#.#.#", "t24"}, + {"*", "t25"}, + {"#.b.#", "t26"}]), lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), %% test some matches - test_topic_expect_match(X, - [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", - "t18", "t20", "t21", "t22", "t23", "t24", - "t26"]}, - {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", - "t12", "t15", "t21", "t22", "t23", "t24", - "t26"]}, - {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", - "t18", "t21", "t22", "t23", "t24", "t26"]}, - {"", ["t5", "t6", "t17", "t24"]}, - {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", "t24", - "t26"]}, - {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", "t23", - "t24"]}, - {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", - "t24"]}, - {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", - "t24"]}, - {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", "t22", - "t23", "t24", "t26"]}, - {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, - {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", - "t25"]}]), + test_topic_expect_match( + X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12", + "t18", "t20", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11", + "t12", "t15", "t21", "t22", "t23", "t24", + "t26"]}, + {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14", + "t18", "t21", "t22", "t23", "t24", "t26"]}, + {"", ["t5", "t6", "t17", "t24"]}, + {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23", + "t24", "t26"]}, + {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22", + "t23", "t24"]}, + {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23", + "t24"]}, + {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23", + "t24"]}, + {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21", + "t22", "t23", "t24", "t26"]}, + {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]}, + {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24", + "t25"]}]), %% remove some bindings RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings), @@ -669,21 +669,21 @@ test_topic_matching() -> %% test some matches test_topic_expect_match(X, - [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", - "t23", "t24", "t26"]}, - {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", - "t22", "t23", "t24", "t26"]}, - {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", - "t23", "t24", "t26"]}, - {"", ["t6", "t17", "t24"]}, - {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, - {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, - {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, - {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, - {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", - "t24", "t26"]}, - {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, - {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", + "t23", "t24", "t26"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", + "t22", "t23", "t24", "t26"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", + "t23", "t24", "t26"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", + "t24", "t26"]}, + {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), %% remove the entire exchange exchange_op_callback(X, delete, [RemainingBindings]), @@ -876,22 +876,22 @@ test_log_management_during_startup() -> passed. test_option_parser() -> - % command and arguments should just pass through + %% command and arguments should just pass through ok = check_get_options({["mock_command", "arg1", "arg2"], []}, [], ["mock_command", "arg1", "arg2"]), - % get flags + %% get flags ok = check_get_options( {["mock_command", "arg1"], [{"-f", true}, {"-f2", false}]}, [{flag, "-f"}, {flag, "-f2"}], ["mock_command", "arg1", "-f"]), - % get options + %% get options ok = check_get_options( {["mock_command"], [{"-foo", "bar"}, {"-baz", "notbaz"}]}, [{option, "-foo", "notfoo"}, {option, "-baz", "notbaz"}], ["mock_command", "-foo", "bar"]), - % shuffled and interleaved arguments and options + %% shuffled and interleaved arguments and options ok = check_get_options( {["a1", "a2", "a3"], [{"-o1", "hello"}, {"-o2", "noto2"}, {"-f", true}]}, [{option, "-o1", "noto1"}, {flag, "-f"}, {option, "-o2", "noto2"}], @@ -1438,7 +1438,7 @@ test_declare_on_dead_queue(SecondaryNode) -> throw(failed_to_create_and_kill_queue) end. -%--------------------------------------------------------------------- +%%--------------------------------------------------------------------- control_action(Command, Args) -> control_action(Command, node(), Args, default_options()). diff --git a/src/rabbit_writer.erl b/src/rabbit_writer.erl index eba86a55..ac3434d2 100644 --- a/src/rabbit_writer.erl +++ b/src/rabbit_writer.erl @@ -28,7 +28,7 @@ -define(HIBERNATE_AFTER, 5000). -%%---------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- -ifdef(use_specs). @@ -69,7 +69,7 @@ -endif. -%%---------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- start(Sock, Channel, FrameMax, Protocol, ReaderPid) -> {ok, @@ -133,7 +133,7 @@ handle_message({inet_reply, _, Status}, _State) -> handle_message(Message, _State) -> exit({writer, message_not_understood, Message}). -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- send_command(W, MethodRecord) -> W ! {send_command, MethodRecord}, @@ -157,13 +157,13 @@ send_command_and_notify(W, Q, ChPid, MethodRecord, Content) -> W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content}, ok. -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- call(Pid, Msg) -> {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity), Res. -%--------------------------------------------------------------------------- +%%--------------------------------------------------------------------------- assemble_frame(Channel, MethodRecord, Protocol) -> ?LOGMESSAGE(out, Channel, MethodRecord, none), -- cgit v1.2.1 From c8044c53b6a8eed5b685ff263b4ffbcba37a98c7 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 20:31:09 +0000 Subject: cosmetic --- src/rabbit_amqqueue.erl | 6 ++--- src/rabbit_channel.erl | 14 +++++------ src/rabbit_control.erl | 46 +++++++++++++++++------------------- src/rabbit_misc.erl | 22 +++++++++--------- src/rabbit_msg_file.erl | 50 +++++++++++++++++++-------------------- src/rabbit_msg_store.erl | 54 +++++++++++++++++++++---------------------- src/rabbit_queue_index.erl | 18 +++++++-------- src/rabbit_reader.erl | 19 ++++++++------- src/rabbit_router.erl | 6 ++--- src/rabbit_variable_queue.erl | 33 +++++++++++++------------- 10 files changed, 131 insertions(+), 137 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 7a996a98..8e4ca8e3 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -214,8 +214,8 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) -> [] -> ok = store_queue(Q), B = add_default_binding(Q), fun (Tx) -> B(Tx), Q end; - [_] -> %% Q exists on stopped node - rabbit_misc:const(not_found) + %% Q exists on stopped node + [_] -> rabbit_misc:const(not_found) end; [ExistingQ = #amqqueue{pid = QPid}] -> case rabbit_misc:is_process_alive(QPid) of @@ -288,7 +288,7 @@ with_exclusive_access_or_die(Name, ReaderPid, F) -> fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end). assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, - RequiredArgs) -> + RequiredArgs) -> rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName, [<<"x-expires">>]). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 5fccb542..526fb428 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -301,8 +301,8 @@ handle_info({'DOWN', _MRef, process, QPid, Reason}, {MXs, State2} = process_confirms(MsgSeqNos, QPid, State1), erase_queue_stats(QPid), State3 = (case Reason of - normal -> fun record_confirms/2; - _ -> fun send_nacks/2 + normal -> fun record_confirms/2; + _ -> fun send_nacks/2 end)(MXs, State2), noreply(queue_blocked(QPid, State3)). @@ -715,9 +715,9 @@ handle_method(#'basic.consume'{queue = QueueNameBin, end) of ok -> {noreply, State#ch{consumer_mapping = - dict:store(ActualConsumerTag, - QueueName, - ConsumerMapping)}}; + dict:store(ActualConsumerTag, + QueueName, + ConsumerMapping)}}; {error, exclusive_consume_unavailable} -> rabbit_misc:protocol_error( access_refused, "~s in exclusive use", @@ -739,8 +739,8 @@ handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, return_ok(State, NoWait, OkMsg); {ok, QueueName} -> NewState = State#ch{consumer_mapping = - dict:erase(ConsumerTag, - ConsumerMapping)}, + dict:erase(ConsumerTag, + ConsumerMapping)}, case rabbit_amqqueue:with( QueueName, fun (Q) -> diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 746bb66e..8364ecd8 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -103,24 +103,22 @@ print_badrpc_diagnostics(Node) -> diagnostics(Node) -> {_NodeName, NodeHost} = rabbit_misc:nodeparts(Node), - [ - {"diagnostics:", []}, - case net_adm:names(NodeHost) of - {error, EpmdReason} -> - {"- unable to connect to epmd on ~s: ~w", - [NodeHost, EpmdReason]}; - {ok, NamePorts} -> - {"- nodes and their ports on ~s: ~p", - [NodeHost, [{list_to_atom(Name), Port} || - {Name, Port} <- NamePorts]]} - end, - {"- current node: ~w", [node()]}, - case init:get_argument(home) of - {ok, [[Home]]} -> {"- current node home dir: ~s", [Home]}; - Other -> {"- no current node home dir: ~p", [Other]} - end, - {"- current node cookie hash: ~s", [rabbit_misc:cookie_hash()]} - ]. + [{"diagnostics:", []}, + case net_adm:names(NodeHost) of + {error, EpmdReason} -> + {"- unable to connect to epmd on ~s: ~w", + [NodeHost, EpmdReason]}; + {ok, NamePorts} -> + {"- nodes and their ports on ~s: ~p", + [NodeHost, [{list_to_atom(Name), Port} || + {Name, Port} <- NamePorts]]} + end, + {"- current node: ~w", [node()]}, + case init:get_argument(home) of + {ok, [[Home]]} -> {"- current node home dir: ~s", [Home]}; + Other -> {"- no current node home dir: ~p", [Other]} + end, + {"- current node cookie hash: ~s", [rabbit_misc:cookie_hash()]}]. stop() -> ok. @@ -152,13 +150,13 @@ action(force_reset, Node, [], _Opts, Inform) -> action(cluster, Node, ClusterNodeSs, _Opts, Inform) -> ClusterNodes = lists:map(fun list_to_atom/1, ClusterNodeSs), Inform("Clustering node ~p with ~p", - [Node, ClusterNodes]), + [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, cluster, [ClusterNodes]); action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> ClusterNodes = lists:map(fun list_to_atom/1, ClusterNodeSs), Inform("Forcefully clustering node ~p with ~p (ignoring offline nodes)", - [Node, ClusterNodes]), + [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, force_cluster, [ClusterNodes]); action(status, Node, [], _Opts, Inform) -> @@ -320,10 +318,8 @@ wait_for_application0(Node, Attempts) -> wait_for_application(Node, Attempts). default_if_empty(List, Default) when is_list(List) -> - if List == [] -> - Default; - true -> - [list_to_atom(X) || X <- List] + if List == [] -> Default; + true -> [list_to_atom(X) || X <- List] end. display_info_list(Results, InfoItemKeys) when is_list(Results) -> @@ -414,7 +410,7 @@ prettify_typed_amqp_value(Type, Value) -> _ -> Value end. -% the slower shutdown on windows required to flush stdout +%% the slower shutdown on windows required to flush stdout quit(Status) -> case os:type() of {unix, _} -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 5579dbab..e79a58a1 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -469,11 +469,11 @@ map_in_order(F, L) -> table_fold(F, Acc0, TableName) -> lists:foldl( fun (E, Acc) -> execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, read) of - [] -> Acc; - _ -> F(E, Acc) - end - end) + fun () -> case mnesia:match_object(TableName, E, read) of + [] -> Acc; + _ -> F(E, Acc) + end + end) end, Acc0, dirty_read_all(TableName)). dirty_read_all(TableName) -> @@ -755,12 +755,12 @@ unlink_and_capture_exit(Pid) -> after 0 -> ok end. -% Separate flags and options from arguments. -% get_options([{flag, "-q"}, {option, "-p", "/"}], -% ["set_permissions","-p","/","guest", -% "-q",".*",".*",".*"]) -% == {["set_permissions","guest",".*",".*",".*"], -% [{"-q",true},{"-p","/"}]} +%% Separate flags and options from arguments. +%% get_options([{flag, "-q"}, {option, "-p", "/"}], +%% ["set_permissions","-p","/","guest", +%% "-q",".*",".*",".*"]) +%% == {["set_permissions","guest",".*",".*",".*"], +%% [{"-q",true},{"-p","/"}]} get_options(Defs, As) -> lists:foldl(fun(Def, {AsIn, RsIn}) -> {AsOut, Value} = case Def of diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl index 4b97d74c..ea7cf80c 100644 --- a/src/rabbit_msg_file.erl +++ b/src/rabbit_msg_file.erl @@ -60,9 +60,9 @@ append(FileHdl, Guid, MsgBody) Size = MsgBodyBinSize + ?GUID_SIZE_BYTES, case file_handle_cache:append(FileHdl, <>) of + Guid:?GUID_SIZE_BYTES/binary, + MsgBodyBin:MsgBodyBinSize/binary, + ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>) of ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT}; KO -> KO end. @@ -72,9 +72,9 @@ read(FileHdl, TotalSize) -> BodyBinSize = Size - ?GUID_SIZE_BYTES, case file_handle_cache:read(FileHdl, TotalSize) of {ok, <>} -> + Guid:?GUID_SIZE_BYTES/binary, + MsgBodyBin:BodyBinSize/binary, + ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>} -> {ok, {Guid, binary_to_term(MsgBodyBin)}}; KO -> KO end. @@ -97,26 +97,26 @@ scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) -> end. scanner(<<>>, Offset, _Fun, Acc) -> - {<<>>, Acc, Offset}; + {<<>>, Acc, Offset}; scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) -> - {<<>>, Acc, Offset}; %% Nothing to do other than stop. + {<<>>, Acc, Offset}; %% Nothing to do other than stop. scanner(<>, Offset, Fun, Acc) -> - TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, - case WriteMarker of - ?WRITE_OK_MARKER -> - %% Here we take option 5 from - %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in - %% which we read the Guid as a number, and then convert it - %% back to a binary in order to work around bugs in - %% Erlang's GC. - <> = - <>, - <> = <>, - scanner(Rest, Offset + TotalSize, Fun, - Fun({Guid, TotalSize, Offset, Msg}, Acc)); - _ -> - scanner(Rest, Offset + TotalSize, Fun, Acc) - end; + TotalSize = Size + ?FILE_PACKING_ADJUSTMENT, + case WriteMarker of + ?WRITE_OK_MARKER -> + %% Here we take option 5 from + %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in + %% which we read the Guid as a number, and then convert it + %% back to a binary in order to work around bugs in + %% Erlang's GC. + <> = + <>, + <> = <>, + scanner(Rest, Offset + TotalSize, Fun, + Fun({Guid, TotalSize, Offset, Msg}, Acc)); + _ -> + scanner(Rest, Offset + TotalSize, Fun, Acc) + end; scanner(Data, Offset, _Fun, Acc) -> - {Data, Acc, Offset}. + {Data, Acc, Offset}. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index d1b8f707..8e1b2ac4 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -549,7 +549,7 @@ client_read3(#msg_location { guid = Guid, file = File }, Defer, %% GC ends, we +1 readers, msg_store ets:deletes (and %% unlocks the dest) try Release(), - Defer() + Defer() catch error:badarg -> read(Guid, CState) end; [#file_summary { locked = false }] -> @@ -667,7 +667,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> successfully_recovered = CleanShutdown, file_size_limit = FileSizeLimit, cref_to_guids = dict:new() - }, + }, %% If we didn't recover the msg location index then we need to %% rebuild it now. @@ -1256,7 +1256,7 @@ safe_file_delete(File, Dir, FileHandlesEts) -> close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts, client_ref = Ref } = - CState) -> + CState) -> Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}), {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) -> true = ets:delete(FileHandlesEts, Key), @@ -1465,7 +1465,7 @@ recover_file_summary(true, Dir) -> Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME), case ets:file2tab(Path) of {ok, Tid} -> file:delete(Path), - {true, Tid}; + {true, Tid}; {error, _Error} -> recover_file_summary(false, Dir) end. @@ -1530,7 +1530,7 @@ scan_file_for_valid_messages(Dir, FileName) -> {ok, Hdl} -> Valid = rabbit_msg_file:scan( Hdl, filelib:file_size( form_filename(Dir, FileName)), - fun scan_fun/2, []), + fun scan_fun/2, []), %% if something really bad has happened, %% the close could fail, but ignore file_handle_cache:close(Hdl), @@ -1693,8 +1693,8 @@ maybe_compact(State = #msstate { sum_valid_data = SumValid, pending_gc_completion = Pending, file_summary_ets = FileSummaryEts, file_size_limit = FileSizeLimit }) - when (SumFileSize > 2 * FileSizeLimit andalso - (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION) -> + when SumFileSize > 2 * FileSizeLimit andalso + (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION -> %% TODO: the algorithm here is sub-optimal - it may result in a %% complete traversal of FileSummaryEts. case ets:first(FileSummaryEts) of @@ -1757,10 +1757,10 @@ delete_file_if_empty(File, State = #msstate { locked = false }] = ets:lookup(FileSummaryEts, File), case ValidData of - 0 -> %% don't delete the file_summary_ets entry for File here - %% because we could have readers which need to be able to - %% decrement the readers count. - true = ets:update_element(FileSummaryEts, File, + %% don't delete the file_summary_ets entry for File here + %% because we could have readers which need to be able to + %% decrement the readers count. + 0 -> true = ets:update_element(FileSummaryEts, File, {#file_summary.locked, true}), ok = rabbit_msg_store_gc:delete(GCPid, File), Pending1 = orddict_store(File, [], Pending), @@ -1813,17 +1813,17 @@ combine_files(Source, Destination, dir = Dir, msg_store = Server }) -> [#file_summary { - readers = 0, - left = Destination, - valid_total_size = SourceValid, - file_size = SourceFileSize, - locked = true }] = ets:lookup(FileSummaryEts, Source), + readers = 0, + left = Destination, + valid_total_size = SourceValid, + file_size = SourceFileSize, + locked = true }] = ets:lookup(FileSummaryEts, Source), [#file_summary { - readers = 0, - right = Source, - valid_total_size = DestinationValid, - file_size = DestinationFileSize, - locked = true }] = ets:lookup(FileSummaryEts, Destination), + readers = 0, + right = Source, + valid_total_size = DestinationValid, + file_size = DestinationFileSize, + locked = true }] = ets:lookup(FileSummaryEts, Destination), SourceName = filenum_to_name(Source), DestinationName = filenum_to_name(Destination), @@ -2001,12 +2001,12 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> ?HANDLE_CACHE_BUFFER_SIZE}]), {ok, _Acc, _IgnoreSize} = rabbit_msg_file:scan( - RefOld, filelib:file_size(FileOld), - fun({Guid, _Size, _Offset, BinMsg}, ok) -> - {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), - {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), - ok - end, ok), + RefOld, filelib:file_size(FileOld), + fun({Guid, _Size, _Offset, BinMsg}, ok) -> + {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), + {ok, _} = rabbit_msg_file:append(RefNew, Guid, MsgNew), + ok + end, ok), file_handle_cache:close(RefOld), file_handle_cache:close(RefNew), ok. diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index bc329947..00f5a752 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -272,7 +272,7 @@ publish(Guid, SeqId, MsgProps, IsPersistent, false -> ?PUB_TRANS_JPREFIX end):?JPREFIX_BITS, SeqId:?SEQ_BITS>>, - create_pub_record_body(Guid, MsgProps)]), + create_pub_record_body(Guid, MsgProps)]), maybe_flush_journal( add_to_journal(SeqId, {Guid, MsgProps, IsPersistent}, State1)). @@ -666,8 +666,8 @@ recover_journal(State) -> journal_minus_segment(JEntries, SegEntries), Segment #segment { journal_entries = JEntries1, unacked = (UnackedCountInJournal + - UnackedCountInSeg - - UnackedCountDuplicates) } + UnackedCountInSeg - + UnackedCountDuplicates) } end, Segments), State1 #qistate { segments = Segments1 }. @@ -799,16 +799,16 @@ write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> {Guid, MsgProps, IsPersistent} -> file_handle_cache:append( Hdl, [<>, - create_pub_record_body(Guid, MsgProps)]) + (bool_to_int(IsPersistent)):1, + RelSeq:?REL_SEQ_BITS>>, + create_pub_record_body(Guid, MsgProps)]) end, ok = case {Del, Ack} of {no_del, no_ack} -> ok; _ -> Binary = <>, + RelSeq:?REL_SEQ_BITS>>, file_handle_cache:append( Hdl, case {Del, Ack} of {del, ack} -> [Binary, Binary]; @@ -853,14 +853,14 @@ load_segment(KeepAcked, #segment { path = Path }) -> load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of {ok, <>} -> + IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>} -> {Guid, MsgProps} = read_pub_record_body(Hdl), Obj = {{Guid, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, SegEntries1 = array:set(RelSeq, Obj, SegEntries), load_segment_entries(KeepAcked, Hdl, SegEntries1, UnackedCount + 1); {ok, <>} -> + RelSeq:?REL_SEQ_BITS>>} -> {UnackedCountDelta, SegEntries1} = case array:get(RelSeq, SegEntries) of {Pub, no_del, no_ack} -> diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index f9a3d9c7..710e6878 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -592,14 +592,14 @@ handle_method0(MethodName, FieldsBin, State = #v1{connection = #connection{protocol = Protocol}}) -> HandleException = fun(R) -> - case ?IS_RUNNING(State) of - true -> send_exception(State, 0, R); - %% We don't trust the client at this point - force - %% them to wait for a bit so they can't DOS us with - %% repeated failed logins etc. - false -> timer:sleep(?SILENT_CLOSE_DELAY * 1000), - throw({channel0_error, State#v1.connection_state, R}) - end + case ?IS_RUNNING(State) of + true -> send_exception(State, 0, R); + %% We don't trust the client at this point - force + %% them to wait for a bit so they can't DOS us with + %% repeated failed logins etc. + false -> timer:sleep(?SILENT_CLOSE_DELAY * 1000), + throw({channel0_error, State#v1.connection_state, R}) + end end, try handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin), @@ -734,8 +734,7 @@ auth_mechanisms(Sock) -> auth_mechanisms_binary(Sock) -> list_to_binary( - string:join( - [atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")). + string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")). auth_phase(Response, State = #v1{auth_mechanism = AuthMechanism, diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 53e707f4..f6a1c92f 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -59,7 +59,7 @@ deliver(QNames, Delivery = #delivery{mandatory = false, {routed, QPids}; deliver(QNames, Delivery = #delivery{mandatory = Mandatory, - immediate = Immediate}) -> + immediate = Immediate}) -> QPids = lookup_qpids(QNames), {Success, _} = delegate:invoke(QPids, @@ -67,7 +67,7 @@ deliver(QNames, Delivery = #delivery{mandatory = Mandatory, rabbit_amqqueue:deliver(Pid, Delivery) end), {Routed, Handled} = - lists:foldl(fun fold_deliveries/2, {false, []}, Success), + lists:foldl(fun fold_deliveries/2, {false, []}, Success), check_delivery(Mandatory, Immediate, {Routed, Handled}). @@ -91,7 +91,7 @@ match_routing_key(SrcName, [RoutingKey]) -> mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]); match_routing_key(SrcName, [_|_] = RoutingKeys) -> Condition = list_to_tuple(['orelse' | [{'=:=', '$2', RKey} || - RKey <- RoutingKeys]]), + RKey <- RoutingKeys]]), MatchHead = #route{binding = #binding{source = SrcName, destination = '$1', key = '$2', diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 6a461a77..07f31a3a 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -510,8 +510,7 @@ publish(Msg, MsgProps, State) -> a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { guid = Guid }, - #message_properties { - needs_confirming = NeedsConfirming }, + #message_properties { needs_confirming = NeedsConfirming }, State = #vqstate { len = 0 }) -> case NeedsConfirming of true -> blind_confirm(self(), gb_sets:singleton(Guid)); @@ -632,12 +631,12 @@ internal_fetch(AckRequired, MsgStatus = #msg_status { %% 3. If an ack is required, add something sensible to PA {AckTag, State1} = case AckRequired of - true -> StateN = record_pending_ack( - MsgStatus #msg_status { - is_delivered = true }, State), - {SeqId, StateN}; - false -> {undefined, State} - end, + true -> StateN = record_pending_ack( + MsgStatus #msg_status { + is_delivered = true }, State), + {SeqId, StateN}; + false -> {undefined, State} + end, PCount1 = PCount - one_if(IsPersistent andalso not AckRequired), Len1 = Len - 1, @@ -777,8 +776,8 @@ ram_duration(State = #vqstate { RamAckCount = gb_trees:size(RamAckIndex), Duration = %% msgs+acks / (msgs+acks/sec) == sec - case AvgEgressRate == 0 andalso AvgIngressRate == 0 andalso - AvgAckEgressRate == 0 andalso AvgAckIngressRate == 0 of + case (AvgEgressRate == 0 andalso AvgIngressRate == 0 andalso + AvgAckEgressRate == 0 andalso AvgAckIngressRate == 0) of true -> infinity; false -> (RamMsgCountPrev + RamMsgCount + RamAckCount + RamAckCountPrev) / @@ -1393,7 +1392,7 @@ accumulate_ack_init() -> {[], orddict:new()}. accumulate_ack(_SeqId, #msg_status { is_persistent = false, %% ASSERTIONS msg_on_disk = false, index_on_disk = false }, - {PersistentSeqIdsAcc, GuidsByStore}) -> + {PersistentSeqIdsAcc, GuidsByStore}) -> {PersistentSeqIdsAcc, GuidsByStore}; accumulate_ack(SeqId, {IsPersistent, Guid, _MsgProps}, {PersistentSeqIdsAcc, GuidsByStore}) -> @@ -1817,12 +1816,12 @@ push_betas_to_deltas(Generator, Limit, Q, Count, RamIndexCount, IndexState) -> multiple_routing_keys() -> transform_storage( - fun ({basic_message, ExchangeName, Routing_Key, Content, - Guid, Persistent}) -> - {ok, {basic_message, ExchangeName, [Routing_Key], Content, - Guid, Persistent}}; - (_) -> {error, corrupt_message} - end), + fun ({basic_message, ExchangeName, Routing_Key, Content, + Guid, Persistent}) -> + {ok, {basic_message, ExchangeName, [Routing_Key], Content, + Guid, Persistent}}; + (_) -> {error, corrupt_message} + end), ok. -- cgit v1.2.1 From 0e40c5131cf79c123b9eb85100bedebaa218df45 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 4 Mar 2011 20:42:51 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 2 +- src/rabbit_queue_index.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 48fce9ed..4f5d2411 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -150,7 +150,7 @@ -spec(client_ref/1 :: (client_msstate()) -> client_ref()). -spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'). -spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) -> - {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). + {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). -spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). -spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). -spec(release/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 59d87654..8227e4cd 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -214,7 +214,7 @@ boolean(), boolean()}], qistate()}). -spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()). -spec(bounds/1 :: (qistate()) -> - {non_neg_integer(), non_neg_integer(), qistate()}). + {non_neg_integer(), non_neg_integer(), qistate()}). -spec(recover/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}). -spec(add_queue_ttl/0 :: () -> 'ok'). -- cgit v1.2.1 From 40d08e7806c1980d428cd3065f71faa08e7239a9 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 00:31:49 +0000 Subject: make handling of confirms more obvious in BQ API and fix some bugs introduced earlier ...amazingly it all seems to work now --- include/rabbit_backing_queue_spec.hrl | 1 + src/rabbit_amqqueue_process.erl | 31 ++++++++++----------- src/rabbit_backing_queue.erl | 4 +++ src/rabbit_variable_queue.erl | 51 +++++++++++++++++++---------------- 4 files changed, 49 insertions(+), 38 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 2e4d1b0a..b2bf6bbb 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -43,6 +43,7 @@ (false, rabbit_types:basic_message(), rabbit_types:message_properties(), state()) -> {undefined, state()}). +-spec(drain_confirmed/1 :: (state()) -> {[rabbit_guid:guid()], state()}). -spec(dropwhile/2 :: (fun ((rabbit_types:message_properties()) -> boolean()), state()) -> state()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 069b803e..4d8b936a 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -215,13 +215,15 @@ noreply(NewState) -> {NewState1, Timeout} = next_state(NewState), {noreply, NewState1, Timeout}. -next_state(State) -> - State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = - ensure_rate_timer(State), - State2 = ensure_stats_timer(State1), - case BQ:needs_idle_timeout(BQS) of - true -> {ensure_sync_timer(State2), 0}; - false -> {stop_sync_timer(State2), hibernate} +next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> + {Guids, BQS1} = BQ:drain_confirmed(BQS), + BQNeedsSync = BQ:needs_idle_timeout(BQS1), + State1 = ensure_stats_timer( + ensure_rate_timer( + confirm_messages(Guids, State#q{backing_queue_state = BQS1}))), + case BQNeedsSync of + true -> {ensure_sync_timer(State1), 0}; + false -> {stop_sync_timer(State1), hibernate} end. ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> @@ -418,6 +420,8 @@ deliver_from_queue_deliver(AckRequired, false, State) -> fetch(AckRequired, State), {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. +confirm_messages([], State) -> + State; confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> {CMs, GTC1} = lists:foldl( @@ -523,9 +527,8 @@ deliver_or_enqueue(Delivery, State) -> requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> maybe_run_queue_via_backing_queue( - fun (BQS) -> - {[], BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS)} - end, State). + fun (BQS) -> BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS) end, + State). fetch(AckRequired, State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> @@ -628,13 +631,11 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> - maybe_run_queue_via_backing_queue( - fun (BQS) -> {[], BQ:idle_timeout(BQS)} end, State). + maybe_run_queue_via_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, + State). maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> - {Guids, BQS1} = Fun(BQS), - run_message_queue( - confirm_messages(Guids, State#q{backing_queue_state = BQS1})). + run_message_queue(State#q{backing_queue_state = Fun(BQS)}). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, State = #q{backing_queue = BQ, diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index a8e201ea..b06f1e9c 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -54,6 +54,10 @@ behaviour_info(callbacks) -> %% (i.e. saves the round trip through the backing queue). {publish_delivered, 4}, + %% Return ids of messages which have been confirmed since + %% the last invocation of this function (or initialisation). + {drain_confirmed, 1}, + %% Drop messages from the head of the queue while the supplied %% predicate returns true. {dropwhile, 2}, diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 67c4cc3c..eca3d8d3 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -17,8 +17,8 @@ -module(rabbit_variable_queue). -export([init/5, terminate/1, delete_and_terminate/1, - purge/1, publish/3, publish_delivered/4, fetch/2, ack/2, - tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, + purge/1, publish/3, publish_delivered/4, drain_confirmed/1, + fetch/2, ack/2, tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, @@ -255,6 +255,7 @@ msgs_on_disk, msg_indices_on_disk, unconfirmed, + confirmed, ack_out_counter, ack_in_counter, ack_rates @@ -353,6 +354,7 @@ msgs_on_disk :: gb_set(), msg_indices_on_disk :: gb_set(), unconfirmed :: gb_set(), + confirmed :: gb_set(), ack_out_counter :: non_neg_integer(), ack_in_counter :: non_neg_integer(), ack_rates :: rates() }). @@ -443,8 +445,8 @@ init(QueueName, true, true, AsyncCallback, SyncCallback, rabbit_msg_store:contains(Guid, PersistentClient) end, MsgIdxOnDiskFun), - init(true, IndexState, DeltaCount, Terms1, - PersistentClient, TransientClient, AsyncCallback, SyncCallback). + init(true, IndexState, DeltaCount, Terms1, AsyncCallback, SyncCallback, + PersistentClient, TransientClient). terminate(State) -> State1 = #vqstate { persistent_count = PCount, @@ -549,6 +551,9 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, persistent_count = PCount1, unconfirmed = UC1 }))}. +drain_confirmed(State = #vqstate { confirmed = C }) -> + {gb_sets:to_list(C), State #vqstate { confirmed = gb_sets:new() }}. + dropwhile(Pred, State) -> {_OkOrEmpty, State1} = dropwhile1(Pred, State), State1. @@ -981,7 +986,7 @@ msg_store_close_fds_fun(IsPersistent, Callback) -> fun (State = #vqstate { msg_store_clients = MSCState }) -> {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent), - {[], State #vqstate { msg_store_clients = MSCState1 }} + State #vqstate { msg_store_clients = MSCState1 } end) end. @@ -1068,7 +1073,7 @@ update_rate(Now, Then, Count, {OThen, OCount}) -> %%---------------------------------------------------------------------------- init(IsDurable, IndexState, DeltaCount, Terms, - PersistentClient, TransientClient, AsyncCallback, SyncCallback) -> + AsyncCallback, SyncCallback, PersistentClient, TransientClient) -> {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount), @@ -1111,6 +1116,7 @@ init(IsDurable, IndexState, DeltaCount, Terms, msgs_on_disk = gb_sets:new(), msg_indices_on_disk = gb_sets:new(), unconfirmed = gb_sets:new(), + confirmed = gb_sets:new(), ack_out_counter = 0, ack_in_counter = 0, ack_rates = blank_rate(Now, 0) }, @@ -1427,12 +1433,14 @@ confirm_commit_index(State = #vqstate { index_state = IndexState }) -> false -> State end. -remove_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, +record_confirms(GuidSet, State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> + unconfirmed = UC, + confirmed = C }) -> State #vqstate { msgs_on_disk = gb_sets:difference(MOD, GuidSet), msg_indices_on_disk = gb_sets:difference(MIOD, GuidSet), - unconfirmed = gb_sets:difference(UC, GuidSet) }. + unconfirmed = gb_sets:difference(UC, GuidSet), + confirmed = gb_sets:union (C, GuidSet) }. needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, unconfirmed = UC }) -> @@ -1449,11 +1457,8 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, %% subtraction. not (gb_sets:is_empty(UC) orelse gb_sets:is_subset(UC, MIOD)). -msgs_confirmed(GuidSet, State) -> - {gb_sets:to_list(GuidSet), remove_confirms(GuidSet, State)}. - blind_confirm(Callback, GuidSet) -> - Callback(fun (State) -> msgs_confirmed(GuidSet, State) end). + Callback(fun (State) -> record_confirms(GuidSet, State) end). msgs_written_to_disk(Callback, GuidSet, removed) -> blind_confirm(Callback, GuidSet); @@ -1461,22 +1466,22 @@ msgs_written_to_disk(Callback, GuidSet, written) -> Callback(fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MIOD), - State #vqstate { - msgs_on_disk = - gb_sets:union( - MOD, gb_sets:intersection(UC, GuidSet)) }) + record_confirms(gb_sets:intersection(GuidSet, MIOD), + State #vqstate { + msgs_on_disk = + gb_sets:union( + MOD, gb_sets:intersection(UC, GuidSet)) }) end). msg_indices_written_to_disk(Callback, GuidSet) -> Callback(fun (State = #vqstate { msgs_on_disk = MOD, msg_indices_on_disk = MIOD, unconfirmed = UC }) -> - msgs_confirmed(gb_sets:intersection(GuidSet, MOD), - State #vqstate { - msg_indices_on_disk = - gb_sets:union( - MIOD, gb_sets:intersection(UC, GuidSet)) }) + record_confirms(gb_sets:intersection(GuidSet, MOD), + State #vqstate { + msg_indices_on_disk = + gb_sets:union( + MIOD, gb_sets:intersection(UC, GuidSet)) }) end). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 15ea3055ad204a3dc44a9f3c559cb9428bcfe8c3 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Sat, 5 Mar 2011 02:28:19 +0000 Subject: nack messages when the first queue dies --- src/rabbit_channel.erl | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 526fb428..e2437b8e 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -298,12 +298,13 @@ handle_info({'DOWN', _MRef, process, QPid, Reason}, %% process_confirms to prevent each MsgSeqNo being removed from %% the set one by one which which would be inefficient State1 = State#ch{unconfirmed_qm = gb_trees:delete_any(QPid, UQM)}, - {MXs, State2} = process_confirms(MsgSeqNos, QPid, State1), + {Nack, SendFun} = case Reason of + normal -> {false, fun record_confirms/2}; + _ -> {true, fun send_nacks/2} + end, + {MXs, State2} = process_confirms(MsgSeqNos, QPid, Nack, State1), erase_queue_stats(QPid), - State3 = (case Reason of - normal -> fun record_confirms/2; - _ -> fun send_nacks/2 - end)(MXs, State2), + State3 = SendFun(MXs, State2), noreply(queue_blocked(QPid, State3)). handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> @@ -513,23 +514,25 @@ record_confirms(MXs, State = #ch{confirmed = C}) -> confirm([], _QPid, State) -> State; confirm(MsgSeqNos, QPid, State) -> - {MXs, State1} = process_confirms(MsgSeqNos, QPid, State), + {MXs, State1} = process_confirms(MsgSeqNos, QPid, false, State), record_confirms(MXs, State1). -process_confirms(MsgSeqNos, QPid, State = #ch{unconfirmed_mq = UMQ, - unconfirmed_qm = UQM}) -> +process_confirms(MsgSeqNos, QPid, Nack, State = #ch{unconfirmed_mq = UMQ, + unconfirmed_qm = UQM}) -> {MXs, UMQ1, UQM1} = lists:foldl( - fun(MsgSeqNo, {_DMs, UMQ0, _UQM} = Acc) -> + fun(MsgSeqNo, {_MXs, UMQ0, _UQM} = Acc) -> case gb_trees:lookup(MsgSeqNo, UMQ0) of - {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, Acc, - State); - none -> Acc + {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, + Acc, Nack, State); + none -> + Acc end end, {[], UMQ, UQM}, MsgSeqNos), {MXs, State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}}. -remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, State) -> +remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack, + State) -> %% these confirms will be emitted even when a queue dies, but that %% should be fine, since the queue stats get erased immediately maybe_incr_stats([{{QPid, XName}, 1}], confirm, State), @@ -544,10 +547,12 @@ remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, State) -> UQM end, Qs1 = gb_sets:del_element(QPid, Qs), - case gb_sets:is_empty(Qs1) of - true -> + %% If QPid somehow died initiating a nack, clear the message from + %% internal data-structures. Also, cleanup empty entries. + Empty = gb_sets:is_empty(Qs1), + if (Empty orelse Nack) -> {[{MsgSeqNo, XName} | MXs], gb_trees:delete(MsgSeqNo, UMQ), UQM1}; - false -> + true -> {MXs, gb_trees:update(MsgSeqNo, {XName, Qs1}, UMQ), UQM1} end. -- cgit v1.2.1 From 608ba6ef42d63b95c744d5d744ab1e4181f6ce45 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 08:53:23 +0000 Subject: cosmetic --- src/rabbit_amqqueue_process.erl | 101 +++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 57 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 7719dfe7..24de9415 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -283,17 +283,16 @@ lookup_ch(ChPid) -> ch_record(ChPid) -> Key = {ch, ChPid}, case get(Key) of - undefined -> - MonitorRef = erlang:monitor(process, ChPid), - C = #cr{consumer_count = 0, - ch_pid = ChPid, - monitor_ref = MonitorRef, - acktags = sets:new(), - is_limit_active = false, - txn = none, - unsent_message_count = 0}, - put(Key, C), - C; + undefined -> MonitorRef = erlang:monitor(process, ChPid), + C = #cr{consumer_count = 0, + ch_pid = ChPid, + monitor_ref = MonitorRef, + acktags = sets:new(), + is_limit_active = false, + txn = none, + unsent_message_count = 0}, + put(Key, C), + C; C = #cr{} -> C end. @@ -319,18 +318,16 @@ erase_ch_record(#cr{ch_pid = ChPid, erase({ch, ChPid}), ok. -all_ch_record() -> - [C || {{ch, _}, C} <- get()]. +all_ch_record() -> [C || {{ch, _}, C} <- get()]. is_ch_blocked(#cr{unsent_message_count = Count, is_limit_active = Limited}) -> Limited orelse Count >= ?UNSENT_MESSAGE_LIMIT. ch_record_state_transition(OldCR, NewCR) -> - BlockedOld = is_ch_blocked(OldCR), - BlockedNew = is_ch_blocked(NewCR), - if BlockedOld andalso not(BlockedNew) -> unblock; - BlockedNew andalso not(BlockedOld) -> block; - true -> ok + case {is_ch_blocked(OldCR), is_ch_blocked(NewCR)} of + {true, false} -> unblock; + {false, true} -> block; + {_, _} -> ok end. deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, @@ -365,13 +362,12 @@ deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, case ch_record_state_transition(C, NewC) of ok -> {queue:in(QEntry, ActiveConsumersTail), BlockedConsumers}; - block -> - {ActiveConsumers1, BlockedConsumers1} = - move_consumers(ChPid, - ActiveConsumersTail, - BlockedConsumers), - {ActiveConsumers1, - queue:in(QEntry, BlockedConsumers1)} + block -> {ActiveConsumers1, BlockedConsumers1} = + move_consumers(ChPid, + ActiveConsumersTail, + BlockedConsumers), + {ActiveConsumers1, + queue:in(QEntry, BlockedConsumers1)} end, State2 = State1#q{ active_consumers = NewActiveConsumers, @@ -396,8 +392,7 @@ deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc, {FunAcc, State} end. -deliver_from_queue_pred(IsEmpty, _State) -> - not IsEmpty. +deliver_from_queue_pred(IsEmpty, _State) -> not IsEmpty. deliver_from_queue_deliver(AckRequired, false, State) -> {{Message, IsDelivered, AckTag, Remaining}, State1} = @@ -405,17 +400,16 @@ deliver_from_queue_deliver(AckRequired, false, State) -> {{Message, IsDelivered, AckTag}, 0 == Remaining, State1}. confirm_messages(Guids, State = #q{guid_to_channel = GTC}) -> - {CMs, GTC1} = - lists:foldl( - fun(Guid, {CMs, GTC0}) -> - case dict:find(Guid, GTC0) of - {ok, {ChPid, MsgSeqNo}} -> - {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, GTC0)}; - _ -> - {CMs, GTC0} - end - end, {gb_trees:empty(), GTC}, Guids), + {CMs, GTC1} = lists:foldl( + fun(Guid, {CMs, GTC0}) -> + case dict:find(Guid, GTC0) of + {ok, {ChPid, MsgSeqNo}} -> + {gb_trees_cons(ChPid, MsgSeqNo, CMs), + dict:erase(Guid, GTC0)}; + _ -> + {CMs, GTC0} + end + end, {gb_trees:empty(), GTC}, Guids), gb_trees:map(fun(ChPid, MsgSeqNos) -> rabbit_channel:confirm(ChPid, MsgSeqNos) end, CMs), @@ -480,17 +474,14 @@ attempt_delivery(#delivery{txn = none, {Delivered, State1} = deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), {Delivered, NeedsConfirming, State1}; -attempt_delivery(#delivery{txn = Txn, +attempt_delivery(#delivery{txn = Txn, sender = ChPid, message = Message}, - {NeedsConfirming, - State = #q{backing_queue = BQ, - backing_queue_state = BQS}}) -> + {NeedsConfirming, State = #q{backing_queue = BQ, + backing_queue_state = BQS}}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), - {true, - NeedsConfirming, - State#q{backing_queue_state = - BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS)}}. + BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), + {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}. deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of @@ -661,9 +652,8 @@ drop_expired_messages(State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> Now = now_micros(), BQS1 = BQ:dropwhile( - fun (#message_properties{expiry = Expiry}) -> - Now > Expiry - end, BQS), + fun (#message_properties{expiry = Expiry}) -> Now > Expiry end, + BQS), ensure_ttl_timer(State#q{backing_queue_state = BQS1}). ensure_ttl_timer(State = #q{backing_queue = BQ, @@ -814,8 +804,7 @@ handle_call({info, Items}, _From, State) -> handle_call(consumers, _From, State) -> reply(consumers(State), State); -handle_call({deliver_immediately, Delivery}, - _From, State) -> +handle_call({deliver_immediately, Delivery}, _From, State) -> %% Synchronous, "immediate" delivery mode %% %% FIXME: Is this correct semantics? @@ -906,15 +895,13 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid, case is_ch_blocked(C) of true -> State1#q{ blocked_consumers = - add_consumer( - ChPid, Consumer, - State1#q.blocked_consumers)}; + add_consumer(ChPid, Consumer, + State1#q.blocked_consumers)}; false -> run_message_queue( State1#q{ active_consumers = - add_consumer( - ChPid, Consumer, - State1#q.active_consumers)}) + add_consumer(ChPid, Consumer, + State1#q.active_consumers)}) end, emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, not NoAck), -- cgit v1.2.1 From b0e1d30b61c493e1a108842076376cbfea72040b Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 08:55:12 +0000 Subject: add missing assertion --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 07f31a3a..591e5a66 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -544,7 +544,7 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, dropwhile(Pred, State) -> {_OkOrEmpty, State1} = dropwhile1(Pred, State), - State1. + a(State1). dropwhile1(Pred, State) -> internal_queue_out( -- cgit v1.2.1 From d4fa5254102756b8af4f95822d04285766346f31 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 10:06:16 +0000 Subject: simplify various callback constructions --- src/rabbit_variable_queue.erl | 44 +++++++++++++++++++------------------------ 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 0b22d74e..08449013 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -706,11 +706,13 @@ tx_commit(Txn, Fun, MsgPropsFun, HasPersistentPubs = PersistentGuids =/= [], {AckTags1, a(case IsDurable andalso HasPersistentPubs of - true -> ok = msg_store_sync( - MSCState, true, PersistentGuids, - msg_store_callback(PersistentGuids, Pubs, AckTags1, - Fun, MsgPropsFun, - AsyncCallback, SyncCallback)), + true -> MsgStoreCallback = + fun () -> msg_store_callback( + PersistentGuids, Pubs, AckTags1, Fun, + MsgPropsFun, AsyncCallback, SyncCallback) + end, + ok = msg_store_sync(MSCState, true, PersistentGuids, + fun () -> spawn(MsgStoreCallback) end), State; false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, Fun, MsgPropsFun, State) @@ -947,9 +949,9 @@ msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) -> msg_store_client_init(MsgStore, rabbit_guid:guid(), MsgOnDiskFun, Callback). msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) -> + CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE), rabbit_msg_store:client_init( - MsgStore, Ref, MsgOnDiskFun, - msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE, Callback)). + MsgStore, Ref, MsgOnDiskFun, fun () -> Callback(CloseFDsFun) end). msg_store_write(MSCState, IsPersistent, Guid, Msg) -> with_immutable_msg_store_state( @@ -981,13 +983,10 @@ msg_store_close_fds(MSCState, IsPersistent) -> MSCState, IsPersistent, fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). -msg_store_close_fds_fun(IsPersistent, Callback) -> - fun () -> Callback( - fun (State = #vqstate { msg_store_clients = MSCState }) -> - {ok, MSCState1} = - msg_store_close_fds(MSCState, IsPersistent), - State #vqstate { msg_store_clients = MSCState1 } - end) +msg_store_close_fds_fun(IsPersistent) -> + fun (State = #vqstate { msg_store_clients = MSCState }) -> + {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent), + State #vqstate { msg_store_clients = MSCState1 } end. maybe_write_delivered(false, _SeqId, IndexState) -> @@ -1131,17 +1130,12 @@ blank_rate(Timestamp, IngressLength) -> msg_store_callback(PersistentGuids, Pubs, AckTags, Fun, MsgPropsFun, AsyncCallback, SyncCallback) -> - fun () -> spawn(fun () -> case SyncCallback( - fun (StateN) -> - tx_commit_post_msg_store( - true, Pubs, AckTags, - Fun, MsgPropsFun, StateN) - end) of - ok -> ok; - error -> remove_persistent_messages( - PersistentGuids, AsyncCallback) - end - end) + case SyncCallback(fun (StateN) -> + tx_commit_post_msg_store(true, Pubs, AckTags, + Fun, MsgPropsFun, StateN) + end) of + ok -> ok; + error -> remove_persistent_messages(PersistentGuids, AsyncCallback) end. remove_persistent_messages(Guids, AsyncCallback) -> -- cgit v1.2.1 From 867bf496f0f3917bb3109b17464e7a3c5da20ae8 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 5 Mar 2011 10:48:38 +0000 Subject: shorten maybe_run_queue_via_backing_queue to something less misleading though arguably still quite obscure Also move make it clear in the amqqueue API which exports are genuine and which are for internal use only. --- src/rabbit_amqqueue.erl | 25 +++++++++-------- src/rabbit_amqqueue_process.erl | 61 ++++++++++++++++++++--------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 8e4ca8e3..0adaaa7f 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -17,23 +17,24 @@ -module(rabbit_amqqueue). -export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). --export([internal_declare/2, internal_delete/1, - maybe_run_queue_via_backing_queue/2, - maybe_run_queue_via_backing_queue_async/2, - sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, - set_maximum_since_use/2, maybe_expire/1, drop_expired/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, stat/1, deliver/2, requeue/3, ack/4, reject/4]). -export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]). --export([emit_stats/1]). -export([consumers/1, consumers_all/1]). -export([basic_get/3, basic_consume/7, basic_cancel/4]). -export([notify_sent/2, unblock/2, flush_all/2]). -export([commit_all/3, rollback_all/3, notify_down_all/2, limit_all/3]). -export([on_node_down/1]). +%% internal +-export([internal_declare/2, internal_delete/1, + run_backing_queue/2, run_backing_queue_async/2, + sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, + set_maximum_since_use/2, maybe_expire/1, drop_expired/1, + emit_stats/1]). + -include("rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). @@ -140,9 +141,9 @@ rabbit_types:connection_exit() | fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). --spec(maybe_run_queue_via_backing_queue/2 :: +-spec(run_backing_queue/2 :: (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). --spec(maybe_run_queue_via_backing_queue_async/2 :: +-spec(run_backing_queue_async/2 :: (pid(), (fun ((A) -> {[rabbit_guid:guid()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). @@ -438,11 +439,11 @@ internal_delete(QueueName) -> end end). -maybe_run_queue_via_backing_queue(QPid, Fun) -> - gen_server2:call(QPid, {maybe_run_queue_via_backing_queue, Fun}, infinity). +run_backing_queue(QPid, Fun) -> + gen_server2:call(QPid, {run_backing_queue, Fun}, infinity). -maybe_run_queue_via_backing_queue_async(QPid, Fun) -> - gen_server2:cast(QPid, {maybe_run_queue_via_backing_queue, Fun}). +run_backing_queue_async(QPid, Fun) -> + gen_server2:cast(QPid, {run_backing_queue, Fun}). sync_timeout(QPid) -> gen_server2:cast(QPid, sync_timeout). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 460a97ce..55ee2ee3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -163,14 +163,14 @@ bq_init(BQ, QName, IsDurable, Recover) -> Self = self(), BQ:init(QName, IsDurable, Recover, fun (Fun) -> - rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - Self, Fun) + rabbit_amqqueue:run_backing_queue_async(Self, Fun) end, fun (Fun) -> rabbit_misc:with_exit_handler( fun () -> error end, - fun () -> rabbit_amqqueue:maybe_run_queue_via_backing_queue( - Self, Fun) end) + fun () -> + rabbit_amqqueue:run_backing_queue(Self, Fun) + end) end). process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> @@ -517,7 +517,7 @@ deliver_or_enqueue(Delivery, State) -> end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> - maybe_run_queue_via_backing_queue( + run_backing_queue( fun (BQS) -> BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS) end, State). @@ -622,10 +622,9 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> - maybe_run_queue_via_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, - State). + run_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, State). -maybe_run_queue_via_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> +run_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> run_message_queue(State#q{backing_queue_state = Fun(BQS)}). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, @@ -756,29 +755,29 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {maybe_run_queue_via_backing_queue, _Fun} -> 6; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {run_backing_queue, _Fun} -> 6; + _ -> 0 end. prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; - delete_immediately -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; - maybe_expire -> 8; - drop_expired -> 8; - emit_stats -> 7; - {ack, _Txn, _MsgIds, _ChPid} -> 7; - {reject, _MsgIds, _Requeue, _ChPid} -> 7; - {notify_sent, _ChPid} -> 7; - {unblock, _ChPid} -> 7; - {maybe_run_queue_via_backing_queue, _Fun} -> 6; - sync_timeout -> 6; - _ -> 0 + update_ram_duration -> 8; + delete_immediately -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + {ack, _Txn, _MsgIds, _ChPid} -> 7; + {reject, _MsgIds, _Requeue, _ChPid} -> 7; + {notify_sent, _ChPid} -> 7; + {unblock, _ChPid} -> 7; + {run_backing_queue, _Fun} -> 6; + sync_timeout -> 6; + _ -> 0 end. prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, @@ -991,12 +990,12 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> noreply(requeue_and_run(AckTags, State)) end; -handle_call({maybe_run_queue_via_backing_queue, Fun}, _From, State) -> - reply(ok, maybe_run_queue_via_backing_queue(Fun, State)). +handle_call({run_backing_queue, Fun}, _From, State) -> + reply(ok, run_backing_queue(Fun, State)). -handle_cast({maybe_run_queue_via_backing_queue, Fun}, State) -> - noreply(maybe_run_queue_via_backing_queue(Fun, State)); +handle_cast({run_backing_queue, Fun}, State) -> + noreply(run_backing_queue(Fun, State)); handle_cast(sync_timeout, State) -> noreply(backing_queue_idle_timeout(State#q{sync_timer_ref = undefined})); -- cgit v1.2.1 From 163d45122ee547c2361193939ca41f624b70f366 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Sat, 5 Mar 2011 11:42:43 +0000 Subject: cosmetic --- src/rabbit_channel.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index e2437b8e..8afa2d8d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -525,8 +525,7 @@ process_confirms(MsgSeqNos, QPid, Nack, State = #ch{unconfirmed_mq = UMQ, case gb_trees:lookup(MsgSeqNo, UMQ0) of {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, Acc, Nack, State); - none -> - Acc + none -> Acc end end, {[], UMQ, UQM}, MsgSeqNos), {MXs, State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}}. -- cgit v1.2.1 From 282e6115095e9ed2a60c2b9f5858ff2db17d7d3a Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Sun, 6 Mar 2011 12:45:40 +0000 Subject: add test for confirms in case of queue death There's a race in the test, but it seems to work reliably. I ran it 1000 times in isolation and it didn't fail. --- src/rabbit_tests.erl | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 88b58166..4ad35696 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -57,6 +57,7 @@ all_tests() -> passed = test_cluster_management(), passed = test_user_management(), passed = test_server_status(), + passed = test_confirms(), passed = maybe_run_cluster_dependent_tests(), passed = test_configurable_server_properties(), passed. @@ -1225,6 +1226,79 @@ test_statistics_receive_event1(Ch, Matcher) -> after 1000 -> throw(failed_to_receive_event) end. +test_confirms_receiver(Pid) -> + receive + shutdown -> + ok; + {send_command, Method} -> + Pid ! Method, + test_confirms_receiver(Pid) + end. + +test_confirms() -> + {_Writer, Ch} = test_spawn(fun test_confirms_receiver/1), + DeclareBindDurableQueue = + fun() -> + rabbit_channel:do(Ch, #'queue.declare'{durable = true}), + receive #'queue.declare_ok'{queue = Q0} -> + rabbit_channel:do(Ch, #'queue.bind'{ + queue = Q0, + exchange = <<"amq.direct">>, + routing_key = "magic" }), + receive #'queue.bind_ok'{} -> + Q0 + after 1000 -> + throw(failed_to_bind_queue) + end + after 1000 -> + throw(failed_to_declare_queue) + end + end, + %% Declare and bind two queues + QName1 = DeclareBindDurableQueue(), + QName2 = DeclareBindDurableQueue(), + %% Get the first one's pid (we'll crash it later) + {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)), + QPid1 = Q1#amqqueue.pid, + %% Enable confirms + rabbit_channel:do(Ch, #'confirm.select'{}), + receive #'confirm.select_ok'{} -> + ok + after 1000 -> + throw(failed_to_enable_confirms) + end, + %% Publish a message + rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>, + routing_key = "magic" + }, + rabbit_basic:build_content( + #'P_basic'{delivery_mode = 2}, <<"">>)), + %% Crash the queue + QPid1 ! boom, + %% Wait for a nack + receive + #'basic.nack'{} -> + ok; + #'basic.ack'{} -> + throw(received_ack_instead_of_nack) + after 2000 -> + throw(did_not_receive_nack) + end, + receive + #'basic.ack'{} -> + throw(received_ack_when_none_expected) + after 1000 -> + ok + end, + %% Delete queue + rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), + receive #'queue.delete_ok'{} -> + ok + after 1000 -> + throw(failed_to_cleanup_queue) + end, + passed. + test_statistics() -> application:set_env(rabbit, collect_statistics, fine), -- cgit v1.2.1 From cee0c032110bfd427d854a0e78da1087c5f4bf28 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 11:00:17 +0000 Subject: Tweaked delete accumulator key --- src/rabbit_exchange_type_topic.erl | 60 ++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 98b223ff..7ff0808e 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -67,56 +67,56 @@ add_binding(true, _Exchange, Binding) -> add_binding(false, _Exchange, _Binding) -> ok. -remove_bindings(true, _X, Bs) -> +remove_bindings(true, X, Bs) -> {ToDelete, Paths} = lists:foldl( - fun(B = #binding{source = X, destination = D}, {Acc, PathAcc}) -> + fun(B = #binding{destination = D}, {Acc, PathAcc}) -> Path = [{FinalNode, _} | _] = binding_path(B), - PathAcc1 = decrement_bindings(X, Path, maybe_add_path( - X, Path, PathAcc)), - {[{X, FinalNode, D} | Acc], PathAcc1} + PathAcc1 = decrement_bindings(Path, + maybe_add_path(Path, PathAcc)), + {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), - [trie_remove_binding(X, FinalNode, D) || {X, FinalNode, D} <- ToDelete], + [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {{X, [{Node, W}, {Parent, _} | _ ]}, {0, 0}} + {[{Node, W}, {Parent, _} | _ ], {0, 0}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> ok. -maybe_add_path(_X, [{root, none}], PathAcc) -> +maybe_add_path([{root, none}], PathAcc) -> PathAcc; -maybe_add_path(X, Path, PathAcc) -> - case gb_trees:is_defined({X, Path}, PathAcc) of +maybe_add_path(Path, PathAcc) -> + case gb_trees:is_defined(Path, PathAcc) of true -> PathAcc; - false -> gb_trees:insert({X, Path}, counts(X, Path), PathAcc) + false -> gb_trees:insert(Path, counts(Path), PathAcc) end. -decrement_bindings(X, Path, PathAcc) -> +decrement_bindings(Path, PathAcc) -> with_path_acc(fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, - X, Path, PathAcc). + Path, PathAcc). -decrement_edges(X, Path, PathAcc) -> +decrement_edges(Path, PathAcc) -> with_path_acc(fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, - X, Path, PathAcc). + Path, PathAcc). -with_path_acc(_Fun, _X, [{root, none}], PathAcc) -> +with_path_acc(_Fun, [{root, none}], PathAcc) -> PathAcc; -with_path_acc(Fun, X, Path, PathAcc) -> - NewVal = Fun(gb_trees:get({X, Path}, PathAcc)), - NewPathAcc = gb_trees:update({X, Path}, NewVal, PathAcc), +with_path_acc(Fun, Path, PathAcc) -> + NewVal = Fun(gb_trees:get(Path, PathAcc)), + NewPathAcc = gb_trees:update(Path, NewVal, PathAcc), case NewVal of {0, 0} -> [_ | ParentPath] = Path, - decrement_edges(X, ParentPath, - maybe_add_path(X, ParentPath, NewPathAcc)); + decrement_edges(ParentPath, + maybe_add_path(ParentPath, NewPathAcc)); _ -> NewPathAcc end. -counts(X, [{FinalNode, _} | _]) -> - {trie_binding_count(X, FinalNode), trie_child_count(X, FinalNode)}. +counts([{FinalNode, _} | _]) -> + {trie_binding_count(FinalNode), trie_child_count(FinalNode)}. binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). @@ -232,19 +232,17 @@ trie_binding_op(X, Node, D, Op) -> destination = D}}, write). -trie_child_count(X, Node) -> +trie_child_count(Node) -> count(rabbit_topic_trie_edge, - #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, - node_id = Node, - _ = '_'}, + #topic_trie_edge{trie_edge = #trie_edge{node_id = Node, + _ = '_'}, _ = '_'}). -trie_binding_count(X, Node) -> +trie_binding_count(Node) -> count(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{exchange_name = X, - node_id = Node, - _ = '_'}, + trie_binding = #trie_binding{node_id = Node, + _ = '_'}, _ = '_'}). count(Table, Match) -> -- cgit v1.2.1 From 8cc78e11a85a15770725aa6808d84397abb79521 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 7 Mar 2011 14:32:43 +0000 Subject: document bq init params --- src/rabbit_backing_queue.erl | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index b06f1e9c..dfee2ee3 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -33,6 +33,18 @@ behaviour_info(callbacks) -> {stop, 0}, %% Initialise the backing queue and its state. + %% + %% Takes + %% 1. the queue name + %% 2. a boolean indicating whether the queue is durable + %% 3. a boolean indicating whether the queue is an existing queue + %% that should be recovered + %% 4. an asynchronous callback which can be invoked by the + %% backing queue when an event has occured that requires a + %% state transition. The callback accepts a function from + %% state to state. + %% 5. a synchronous callback. Same as the asynchronous callback + %% but waits for completion and returns 'error' on error. {init, 5}, %% Called on queue shutdown when queue isn't being deleted. -- cgit v1.2.1 From e20fb0e89b7f0c2875921318395c2fffc4dfd4ac Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 15:01:13 +0000 Subject: Added exchange name back into count functions. Made path node id key in delete accumulator --- src/rabbit_exchange_type_topic.erl | 58 ++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 7ff0808e..f9ac69ba 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -72,8 +72,9 @@ remove_bindings(true, X, Bs) -> lists:foldl( fun(B = #binding{destination = D}, {Acc, PathAcc}) -> Path = [{FinalNode, _} | _] = binding_path(B), - PathAcc1 = decrement_bindings(Path, - maybe_add_path(Path, PathAcc)), + PathAcc1 = decrement_bindings(X, Path, + maybe_add_path(X, Path, + PathAcc)), {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), @@ -85,38 +86,43 @@ remove_bindings(true, X, Bs) -> remove_bindings(false, _X, _Bs) -> ok. -maybe_add_path([{root, none}], PathAcc) -> +maybe_add_path(_X, [{root, none}], PathAcc) -> PathAcc; -maybe_add_path(Path, PathAcc) -> - case gb_trees:is_defined(Path, PathAcc) of +maybe_add_path(X, Path = [{Node, _} | _], PathAcc) -> + case gb_trees:is_defined(Node, PathAcc) of true -> PathAcc; - false -> gb_trees:insert(Path, counts(Path), PathAcc) + false -> gb_trees:insert(Node, path_entry(X, Path), PathAcc) end. -decrement_bindings(Path, PathAcc) -> - with_path_acc(fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, +decrement_bindings(X, Path, PathAcc) -> + with_path_acc(X, + fun({_Path, Bindings, Edges}) -> + {Path, Bindings - 1, Edges} + end, Path, PathAcc). -decrement_edges(Path, PathAcc) -> - with_path_acc(fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, +decrement_edges(X, Path, PathAcc) -> + with_path_acc(X, + fun({_Path, Bindings, Edges}) -> + {Path, Bindings, Edges - 1} + end, Path, PathAcc). -with_path_acc(_Fun, [{root, none}], PathAcc) -> +with_path_acc(_X, _Fun, [{root, none}], PathAcc) -> PathAcc; -with_path_acc(Fun, Path, PathAcc) -> - NewVal = Fun(gb_trees:get(Path, PathAcc)), - NewPathAcc = gb_trees:update(Path, NewVal, PathAcc), +with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> + NewVal = Fun(gb_trees:get(Node, PathAcc)), + NewPathAcc = gb_trees:update(Node, NewVal, PathAcc), case NewVal of {0, 0} -> - [_ | ParentPath] = Path, - decrement_edges(ParentPath, - maybe_add_path(ParentPath, NewPathAcc)); + decrement_edges(X, ParentPath, + maybe_add_path(X, ParentPath, NewPathAcc)); _ -> NewPathAcc end. -counts([{FinalNode, _} | _]) -> - {trie_binding_count(FinalNode), trie_child_count(FinalNode)}. +path_entry(X, Path = [{Node, _} | _]) -> + {Path, trie_binding_count(X, Node), trie_child_count(X, Node)}. binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). @@ -232,17 +238,19 @@ trie_binding_op(X, Node, D, Op) -> destination = D}}, write). -trie_child_count(Node) -> +trie_child_count(X, Node) -> count(rabbit_topic_trie_edge, - #topic_trie_edge{trie_edge = #trie_edge{node_id = Node, - _ = '_'}, + #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X, + node_id = Node, + _ = '_'}, _ = '_'}). -trie_binding_count(Node) -> +trie_binding_count(X, Node) -> count(rabbit_topic_trie_binding, #topic_trie_binding{ - trie_binding = #trie_binding{node_id = Node, - _ = '_'}, + trie_binding = #trie_binding{exchange_name = X, + node_id = Node, + _ = '_'}, _ = '_'}). count(Table, Match) -> -- cgit v1.2.1 From 4edad8a5317c97417b0340ce617b5150a069587a Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 15:12:17 +0000 Subject: Comment describing the why behind the refactoring of remove_bindings --- src/rabbit_exchange_type_topic.erl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index f9ac69ba..5c5d760e 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -68,6 +68,11 @@ add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, X, Bs) -> + %% The remove process is split into two distinct phases. In the + %% first phase, we first gather the lists of bindings and edges to + %% delete, then in the second phase we process all the + %% deletions. This is to prevent interleaving of read/write + %% operations in mnesia that can adversely affect performance. {ToDelete, Paths} = lists:foldl( fun(B = #binding{destination = D}, {Acc, PathAcc}) -> -- cgit v1.2.1 From 2219c3d554b1615a967acf62a288ac5813f2cb67 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 7 Mar 2011 15:27:54 +0000 Subject: guids be gone --- src/rabbit_amqqueue_process.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 21541541..b32fa0ff 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -401,11 +401,11 @@ deliver_from_queue_deliver(AckRequired, false, State) -> confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> {CMs, MTC1} = lists:foldl( - fun(Guid, {CMs, MTC0}) -> - case dict:find(Guid, MTC0) of + fun(MsgId, {CMs, MTC0}) -> + case dict:find(MsgId, MTC0) of {ok, {ChPid, MsgSeqNo}} -> {gb_trees_cons(ChPid, MsgSeqNo, CMs), - dict:erase(Guid, MTC0)}; + dict:erase(MsgId, MTC0)}; _ -> {CMs, MTC0} end -- cgit v1.2.1 From 765b4d8124fa44b8116211619d3cab9ecde2e106 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 15:53:17 +0000 Subject: Remove EnvVarUpdate --- packaging/windows-exe/lib/EnvVarUpdate.nsh | 327 ----------------------------- packaging/windows-exe/rabbitmq_nsi.in | 7 - 2 files changed, 334 deletions(-) delete mode 100644 packaging/windows-exe/lib/EnvVarUpdate.nsh diff --git a/packaging/windows-exe/lib/EnvVarUpdate.nsh b/packaging/windows-exe/lib/EnvVarUpdate.nsh deleted file mode 100644 index 839d6a02..00000000 --- a/packaging/windows-exe/lib/EnvVarUpdate.nsh +++ /dev/null @@ -1,327 +0,0 @@ -/** - * EnvVarUpdate.nsh - * : Environmental Variables: append, prepend, and remove entries - * - * WARNING: If you use StrFunc.nsh header then include it before this file - * with all required definitions. This is to avoid conflicts - * - * Usage: - * ${EnvVarUpdate} "ResultVar" "EnvVarName" "Action" "RegLoc" "PathString" - * - * Credits: - * Version 1.0 - * * Cal Turney (turnec2) - * * Amir Szekely (KiCHiK) and e-circ for developing the forerunners of this - * function: AddToPath, un.RemoveFromPath, AddToEnvVar, un.RemoveFromEnvVar, - * WriteEnvStr, and un.DeleteEnvStr - * * Diego Pedroso (deguix) for StrTok - * * Kevin English (kenglish_hi) for StrContains - * * Hendri Adriaens (Smile2Me), Diego Pedroso (deguix), and Dan Fuhry - * (dandaman32) for StrReplace - * - * Version 1.1 (compatibility with StrFunc.nsh) - * * techtonik - * - * http://nsis.sourceforge.net/Environmental_Variables:_append%2C_prepend%2C_and_remove_entries - * - */ - - -!ifndef ENVVARUPDATE_FUNCTION -!define ENVVARUPDATE_FUNCTION -!verbose push -!verbose 3 -!include "LogicLib.nsh" -!include "WinMessages.NSH" -!include "StrFunc.nsh" - -; ---- Fix for conflict if StrFunc.nsh is already includes in main file ----------------------- -!macro _IncludeStrFunction StrFuncName - !ifndef ${StrFuncName}_INCLUDED - ${${StrFuncName}} - !endif - !ifndef Un${StrFuncName}_INCLUDED - ${Un${StrFuncName}} - !endif - !define un.${StrFuncName} "${Un${StrFuncName}}" -!macroend - -!insertmacro _IncludeStrFunction StrTok -!insertmacro _IncludeStrFunction StrStr -!insertmacro _IncludeStrFunction StrRep - -; ---------------------------------- Macro Definitions ---------------------------------------- -!macro _EnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString - Push "${EnvVarName}" - Push "${Action}" - Push "${RegLoc}" - Push "${PathString}" - Call EnvVarUpdate - Pop "${ResultVar}" -!macroend -!define EnvVarUpdate '!insertmacro "_EnvVarUpdateConstructor"' - -!macro _unEnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString - Push "${EnvVarName}" - Push "${Action}" - Push "${RegLoc}" - Push "${PathString}" - Call un.EnvVarUpdate - Pop "${ResultVar}" -!macroend -!define un.EnvVarUpdate '!insertmacro "_unEnvVarUpdateConstructor"' -; ---------------------------------- Macro Definitions end------------------------------------- - -;----------------------------------- EnvVarUpdate start---------------------------------------- -!define hklm_all_users 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' -!define hkcu_current_user 'HKCU "Environment"' - -!macro EnvVarUpdate UN - -Function ${UN}EnvVarUpdate - - Push $0 - Exch 4 - Exch $1 - Exch 3 - Exch $2 - Exch 2 - Exch $3 - Exch - Exch $4 - Push $5 - Push $6 - Push $7 - Push $8 - Push $9 - Push $R0 - - /* After this point: - ------------------------- - $0 = ResultVar (returned) - $1 = EnvVarName (input) - $2 = Action (input) - $3 = RegLoc (input) - $4 = PathString (input) - $5 = Orig EnvVar (read from registry) - $6 = Len of $0 (temp) - $7 = tempstr1 (temp) - $8 = Entry counter (temp) - $9 = tempstr2 (temp) - $R0 = tempChar (temp) */ - - ; Step 1: Read contents of EnvVarName from RegLoc - ; - ; Check for empty EnvVarName - ${If} $1 == "" - SetErrors - DetailPrint "ERROR: EnvVarName is blank" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Check for valid Action - ${If} $2 != "A" - ${AndIf} $2 != "P" - ${AndIf} $2 != "R" - SetErrors - DetailPrint "ERROR: Invalid Action - must be A, P, or R" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ${If} $3 == HKLM - ReadRegStr $5 ${hklm_all_users} $1 ; Get EnvVarName from all users into $5 - ${ElseIf} $3 == HKCU - ReadRegStr $5 ${hkcu_current_user} $1 ; Read EnvVarName from current user into $5 - ${Else} - SetErrors - DetailPrint 'ERROR: Action is [$3] but must be "HKLM" or HKCU"' - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Check for empty PathString - ${If} $4 == "" - SetErrors - DetailPrint "ERROR: PathString is blank" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Make sure we've got some work to do - ${If} $5 == "" - ${AndIf} $2 == "R" - SetErrors - DetailPrint "$1 is empty - Nothing to remove" - Goto EnvVarUpdate_Restore_Vars - ${EndIf} - - ; Step 2: Scrub EnvVar - ; - StrCpy $0 $5 ; Copy the contents to $0 - ; Remove spaces around semicolons (NOTE: spaces before the 1st entry or - ; after the last one are not removed here but instead in Step 3) - ${If} $0 != "" ; If EnvVar is not empty ... - ${Do} - ${${UN}StrStr} $7 $0 " ;" - ${If} $7 == "" - ${ExitDo} - ${EndIf} - ${${UN}StrRep} $0 $0 " ;" ";" ; Remove ';' - ${Loop} - ${Do} - ${${UN}StrStr} $7 $0 "; " - ${If} $7 == "" - ${ExitDo} - ${EndIf} - ${${UN}StrRep} $0 $0 "; " ";" ; Remove ';' - ${Loop} - ${Do} - ${${UN}StrStr} $7 $0 ";;" - ${If} $7 == "" - ${ExitDo} - ${EndIf} - ${${UN}StrRep} $0 $0 ";;" ";" - ${Loop} - - ; Remove a leading or trailing semicolon from EnvVar - StrCpy $7 $0 1 0 - ${If} $7 == ";" - StrCpy $0 $0 "" 1 ; Change ';' to '' - ${EndIf} - StrLen $6 $0 - IntOp $6 $6 - 1 - StrCpy $7 $0 1 $6 - ${If} $7 == ";" - StrCpy $0 $0 $6 ; Change ';' to '' - ${EndIf} - ; DetailPrint "Scrubbed $1: [$0]" ; Uncomment to debug - ${EndIf} - - /* Step 3. Remove all instances of the target path/string (even if "A" or "P") - $6 = bool flag (1 = found and removed PathString) - $7 = a string (e.g. path) delimited by semicolon(s) - $8 = entry counter starting at 0 - $9 = copy of $0 - $R0 = tempChar */ - - ${If} $5 != "" ; If EnvVar is not empty ... - StrCpy $9 $0 - StrCpy $0 "" - StrCpy $8 0 - StrCpy $6 0 - - ${Do} - ${${UN}StrTok} $7 $9 ";" $8 "0" ; $7 = next entry, $8 = entry counter - - ${If} $7 == "" ; If we've run out of entries, - ${ExitDo} ; were done - ${EndIf} ; - - ; Remove leading and trailing spaces from this entry (critical step for Action=Remove) - ${Do} - StrCpy $R0 $7 1 - ${If} $R0 != " " - ${ExitDo} - ${EndIf} - StrCpy $7 $7 "" 1 ; Remove leading space - ${Loop} - ${Do} - StrCpy $R0 $7 1 -1 - ${If} $R0 != " " - ${ExitDo} - ${EndIf} - StrCpy $7 $7 -1 ; Remove trailing space - ${Loop} - ${If} $7 == $4 ; If string matches, remove it by not appending it - StrCpy $6 1 ; Set 'found' flag - ${ElseIf} $7 != $4 ; If string does NOT match - ${AndIf} $0 == "" ; and the 1st string being added to $0, - StrCpy $0 $7 ; copy it to $0 without a prepended semicolon - ${ElseIf} $7 != $4 ; If string does NOT match - ${AndIf} $0 != "" ; and this is NOT the 1st string to be added to $0, - StrCpy $0 $0;$7 ; append path to $0 with a prepended semicolon - ${EndIf} ; - - IntOp $8 $8 + 1 ; Bump counter - ${Loop} ; Check for duplicates until we run out of paths - ${EndIf} - - ; Step 4: Perform the requested Action - ; - ${If} $2 != "R" ; If Append or Prepend - ${If} $6 == 1 ; And if we found the target - DetailPrint "Target is already present in $1. It will be removed and" - ${EndIf} - ${If} $0 == "" ; If EnvVar is (now) empty - StrCpy $0 $4 ; just copy PathString to EnvVar - ${If} $6 == 0 ; If found flag is either 0 - ${OrIf} $6 == "" ; or blank (if EnvVarName is empty) - DetailPrint "$1 was empty and has been updated with the target" - ${EndIf} - ${ElseIf} $2 == "A" ; If Append (and EnvVar is not empty), - StrCpy $0 $0;$4 ; append PathString - ${If} $6 == 1 - DetailPrint "appended to $1" - ${Else} - DetailPrint "Target was appended to $1" - ${EndIf} - ${Else} ; If Prepend (and EnvVar is not empty), - StrCpy $0 $4;$0 ; prepend PathString - ${If} $6 == 1 - DetailPrint "prepended to $1" - ${Else} - DetailPrint "Target was prepended to $1" - ${EndIf} - ${EndIf} - ${Else} ; If Action = Remove - ${If} $6 == 1 ; and we found the target - DetailPrint "Target was found and removed from $1" - ${Else} - DetailPrint "Target was NOT found in $1 (nothing to remove)" - ${EndIf} - ${If} $0 == "" - DetailPrint "$1 is now empty" - ${EndIf} - ${EndIf} - - ; Step 5: Update the registry at RegLoc with the updated EnvVar and announce the change - ; - ClearErrors - ${If} $3 == HKLM - WriteRegExpandStr ${hklm_all_users} $1 $0 ; Write it in all users section - ${ElseIf} $3 == HKCU - WriteRegExpandStr ${hkcu_current_user} $1 $0 ; Write it to current user section - ${EndIf} - - IfErrors 0 +4 - MessageBox MB_OK|MB_ICONEXCLAMATION "Could not write updated $1 to $3" - DetailPrint "Could not write updated $1 to $3" - Goto EnvVarUpdate_Restore_Vars - - ; "Export" our change - SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 - - EnvVarUpdate_Restore_Vars: - ; - ; Restore the user's variables and return ResultVar - Pop $R0 - Pop $9 - Pop $8 - Pop $7 - Pop $6 - Pop $5 - Pop $4 - Pop $3 - Pop $2 - Pop $1 - Push $0 ; Push my $0 (ResultVar) - Exch - Pop $0 ; Restore his $0 - -FunctionEnd - -!macroend ; EnvVarUpdate UN -!insertmacro EnvVarUpdate "" -!insertmacro EnvVarUpdate "un." -;----------------------------------- EnvVarUpdate end---------------------------------------- - -!verbose pop -!endif diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in index 6d79ffd4..3da8f4d2 100644 --- a/packaging/windows-exe/rabbitmq_nsi.in +++ b/packaging/windows-exe/rabbitmq_nsi.in @@ -4,7 +4,6 @@ !include WinMessages.nsh !include FileFunc.nsh !include WordFunc.nsh -!include lib\EnvVarUpdate.nsh !define env_hklm 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' !define uninstall "Software\Microsoft\Windows\CurrentVersion\Uninstall\RabbitMQ" @@ -77,9 +76,6 @@ Section "RabbitMQ Server (required)" Rabbit File /r "rabbitmq_server-%%VERSION%%" File "rabbitmq.ico" - ; Add to PATH - ${EnvVarUpdate} $0 "PATH" "A" "HKLM" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" - ; Write the installation path into the registry WriteRegStr HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server" "Install_Dir" "$INSTDIR" @@ -157,9 +153,6 @@ Section "Uninstall" ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" stop' ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" remove' - ; Remove from PATH - ${un.EnvVarUpdate} $0 "PATH" "R" "HKLM" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" - ; Remove files and uninstaller RMDir /r "$INSTDIR\rabbitmq_server-%%VERSION%%" Delete "$INSTDIR\rabbitmq.ico" -- cgit v1.2.1 From d0c3b429118272bade0b25ef690402af474f9fc8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 16:25:36 +0000 Subject: Add a command prompt shortcut that starts up in the right dir. That's pretty lame, but at least it's safe. --- packaging/windows-exe/rabbitmq_nsi.in | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in index 3da8f4d2..1ed4064e 100644 --- a/packaging/windows-exe/rabbitmq_nsi.in +++ b/packaging/windows-exe/rabbitmq_nsi.in @@ -122,6 +122,9 @@ Section "Start Menu" RabbitStartMenu CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Start Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "start" "$INSTDIR\rabbitmq.ico" CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Stop Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "stop" "$INSTDIR\rabbitmq.ico" + SetOutPath "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Command Prompt (sbin dir).lnk" "$WINDIR\system32\cmd.exe" "" "$WINDIR\system32\cmd.exe" + SetOutPath $INSTDIR SectionEnd ;-------------------------------- -- cgit v1.2.1 From 0d876f0dd1a5e4db1d8a441f53dac2f7aa4e6578 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 7 Mar 2011 16:57:21 +0000 Subject: Fixed a few pattern matching errors - remove actually works again --- src/rabbit_exchange_type_topic.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 5c5d760e..ff4828c1 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -83,9 +83,10 @@ remove_bindings(true, X, Bs) -> {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), + io:format("~p~n", [Paths]), [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {[{Node, W}, {Parent, _} | _ ], {0, 0}} + {Node, {[{Node, W}, {Parent, _} | _], 0, 0}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> @@ -119,7 +120,7 @@ with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> NewVal = Fun(gb_trees:get(Node, PathAcc)), NewPathAcc = gb_trees:update(Node, NewVal, PathAcc), case NewVal of - {0, 0} -> + {_, 0, 0} -> decrement_edges(X, ParentPath, maybe_add_path(X, ParentPath, NewPathAcc)); _ -> -- cgit v1.2.1 From 13bbf692083e6ab07f771b797333f695dc18db32 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:00:11 +0000 Subject: Explain the tuple here. --- src/rabbit_upgrade.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f1f0d6d3..dd253468 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -242,6 +242,8 @@ read_version() -> case rabbit_misc:read_term_file(schema_filename()) of {ok, [V]} -> case is_new_version(V) of false -> {ok, convert_old_version(V)}; + %% Write in this format for future expansion; + %% we want to allow plugins to own upgrades. true -> [{rabbit, RV}] = V, {ok, RV} end; -- cgit v1.2.1 From 4d806324368f812c4831724f868079f3a2835892 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:06:53 +0000 Subject: Variety of small QA-related tweaks. --- src/rabbit.erl | 2 +- src/rabbit_mnesia.erl | 7 ++++--- src/rabbit_upgrade.erl | 20 ++++++++++---------- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 21c1452f..e3288eaf 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -204,7 +204,7 @@ start() -> end. stop() -> - rabbit_mnesia:record_running_disc_nodes(), + ok = rabbit_mnesia:record_running_disc_nodes(), ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 33e8764c..30083cc0 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -387,7 +387,8 @@ record_running_disc_nodes() -> sets:from_list(running_clustered_nodes()))) -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. - rabbit_misc:write_term_file(FileName, [Nodes]). + rabbit_misc:write_term_file(FileName, [Nodes]), + ok. read_previous_run_disc_nodes() -> FileName = running_nodes_filename(), @@ -433,7 +434,7 @@ init_db(ClusterNodes, Force) -> ok = create_schema(); {[], true} -> %% We're the first node up - case rabbit_upgrade:maybe_upgrade(local) of + case rabbit_upgrade:maybe_upgrade_local() of ok -> ensure_schema_integrity(); version_not_available -> schema_ok_or_move() end; @@ -449,7 +450,7 @@ init_db(ClusterNodes, Force) -> true -> disc; false -> ram end), - case rabbit_upgrade:maybe_upgrade(local) of + case rabbit_upgrade:maybe_upgrade_local() of ok -> ok; %% If we're just starting up a new node we won't have diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index dd253468..e466eb87 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -16,7 +16,7 @@ -module(rabbit_upgrade). --export([maybe_upgrade_mnesia/0, maybe_upgrade/1]). +-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). -export([read_version/0, write_version/0, desired_version/0, desired_version/1]). @@ -35,7 +35,7 @@ -type(scope() :: 'mnesia' | 'local'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). --spec(maybe_upgrade/1 :: (scope()) -> 'ok' | 'version_not_available'). +-spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). -spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write_version/0 :: () -> 'ok'). -spec(desired_version/0 :: () -> version()). @@ -128,7 +128,7 @@ upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), - case {am_i_disc_node(), AfterUs} of + case {is_disc_node(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -169,7 +169,7 @@ upgrade_mode(AllNodes) -> end end. -am_i_disc_node() -> +is_disc_node() -> %% This is pretty ugly but we can't start Mnesia and ask it (will hang), %% we can't look at the config file (may not include us even if we're a %% disc node). @@ -210,13 +210,13 @@ secondary_upgrade(AllNodes) -> %% Note that we cluster with all nodes, rather than all disc nodes %% (as we can't know all disc nodes at this point). This is safe as %% we're not writing the cluster config, just setting up Mnesia. - ClusterNodes = case am_i_disc_node() of + ClusterNodes = case is_disc_node() of true -> AllNodes; false -> AllNodes -- [node()] end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - rabbit_mnesia:init_db(ClusterNodes, true), - write_version(mnesia), + ok = rabbit_mnesia:init_db(ClusterNodes, true), + ok = write_version(mnesia), ok. nodes_running(Nodes) -> @@ -230,11 +230,11 @@ node_running(Node) -> %% ------------------------------------------------------------------- -maybe_upgrade(Scope) -> - case upgrades_required(Scope) of +maybe_upgrade_local() -> + case upgrades_required(local) of version_not_available -> version_not_available; [] -> ok; - Upgrades -> apply_upgrades(Scope, Upgrades, + Upgrades -> apply_upgrades(local, Upgrades, fun() -> ok end) end. -- cgit v1.2.1 From 165c1d3f25a44c91650556a68ba725239f1f8d12 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:21:36 +0000 Subject: Spec, rename functions. --- src/rabbit_mnesia.erl | 13 +++++++------ src/rabbit_upgrade.erl | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 30083cc0..eb92e9fe 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -22,8 +22,8 @@ is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, - record_running_disc_nodes/0, read_previous_run_disc_nodes/0, - delete_previous_run_disc_nodes/0, running_nodes_filename/0]). + record_running_disc_nodes/0, read_previously_running_disc_nodes/0, + delete_previously_running_disc_nodes/0, running_nodes_filename/0]). -export([table_names/0]). @@ -45,6 +45,7 @@ -spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). +-spec(init_db/2 :: ([node()], boolean()) -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). -spec(cluster/1 :: ([node()]) -> 'ok'). -spec(force_cluster/1 :: ([node()]) -> 'ok'). @@ -61,8 +62,8 @@ -spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). -spec(read_cluster_nodes_config/0 :: () -> [node()]). -spec(record_running_disc_nodes/0 :: () -> 'ok'). --spec(read_previous_run_disc_nodes/0 :: () -> [node()]). --spec(delete_previous_run_disc_nodes/0 :: () -> 'ok'). +-spec(read_previously_running_disc_nodes/0 :: () -> [node()]). +-spec(delete_previously_running_disc_nodes/0 :: () -> 'ok'). -spec(running_nodes_filename/0 :: () -> file:filename()). -endif. @@ -390,7 +391,7 @@ record_running_disc_nodes() -> rabbit_misc:write_term_file(FileName, [Nodes]), ok. -read_previous_run_disc_nodes() -> +read_previously_running_disc_nodes() -> FileName = running_nodes_filename(), case rabbit_misc:read_term_file(FileName) of {ok, [Nodes]} -> Nodes; @@ -399,7 +400,7 @@ read_previous_run_disc_nodes() -> FileName, Reason}}) end. -delete_previous_run_disc_nodes() -> +delete_previously_running_disc_nodes() -> FileName = running_nodes_filename(), case file:delete(FileName) of ok -> ok; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index e466eb87..0a821878 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -122,12 +122,12 @@ maybe_upgrade_mnesia() -> secondary -> secondary_upgrade(AllNodes) end end, - ok = rabbit_mnesia:delete_previous_run_disc_nodes(). + ok = rabbit_mnesia:delete_previously_running_disc_nodes(). upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> - AfterUs = rabbit_mnesia:read_previous_run_disc_nodes(), + AfterUs = rabbit_mnesia:read_previously_running_disc_nodes(), case {is_disc_node(), AfterUs} of {true, []} -> primary; -- cgit v1.2.1 From f6d550f49e9e90a551ecd20e80d405068db7d781 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 7 Mar 2011 18:23:56 +0000 Subject: Simpler is_new_version/1. --- src/rabbit_upgrade.erl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0a821878..f59dbdfe 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -401,7 +401,8 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). info(Msg, Args) -> error_logger:info_msg(Msg, Args). is_new_version(Version) -> - is_list(Version) andalso - length(Version) > 0 andalso - lists:all(fun(Item) -> is_tuple(Item) andalso size(Item) == 2 end, - Version). + try + orddict:size(Version) > 0 + catch error:badarg -> + false + end. -- cgit v1.2.1 From 32bcf5f5f54f8a57dc8b9b27966aebc679dee7d6 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 8 Mar 2011 10:05:11 +0000 Subject: Removed io:format --- src/rabbit_exchange_type_topic.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ff4828c1..7cff129c 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -83,7 +83,6 @@ remove_bindings(true, X, Bs) -> {[{FinalNode, D} | Acc], PathAcc1} end, {[], gb_trees:empty()}, Bs), - io:format("~p~n", [Paths]), [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || {Node, {[{Node, W}, {Parent, _} | _], 0, 0}} -- cgit v1.2.1 From 21343de94bc744bc430bbc2b72d9526c284432a6 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 8 Mar 2011 11:11:57 +0000 Subject: Use system_info(check_io)/max_fds instead of ulimit. --- src/file_handle_cache.erl | 39 ++++++++++----------------------------- 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 6f8241b3..de602d39 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -156,13 +156,6 @@ -define(SERVER, ?MODULE). -define(RESERVED_FOR_OTHERS, 100). -%% Googling around suggests that Windows has a limit somewhere around -%% 16M, eg -%% http://blogs.technet.com/markrussinovich/archive/2009/09/29/3283844.aspx -%% however, it turns out that's only available through the win32 -%% API. Via the C Runtime, we have just 512: -%% http://msdn.microsoft.com/en-us/library/6e3b887c%28VS.80%29.aspx --define(FILE_HANDLES_LIMIT_WINDOWS, 512). -define(FILE_HANDLES_LIMIT_OTHER, 1024). -define(FILE_HANDLES_CHECK_INTERVAL, 2000). @@ -1185,29 +1178,17 @@ track_client(Pid, Clients) -> false -> ok end. -%% For all unices, assume ulimit exists. Further googling suggests -%% that BSDs (incl OS X), solaris and linux all agree that ulimit -n -%% is file handles + ulimit() -> - case os:type() of - {win32, _OsName} -> - ?FILE_HANDLES_LIMIT_WINDOWS; - {unix, _OsName} -> - %% Under Linux, Solaris and FreeBSD, ulimit is a shell - %% builtin, not a command. In OS X and AIX it's a command. - %% Fortunately, os:cmd invokes the cmd in a shell env, so - %% we're safe in all cases. - case os:cmd("ulimit -n") of - "unlimited" -> - infinity; - String = [C|_] when $0 =< C andalso C =< $9 -> - list_to_integer( - lists:takewhile( - fun (D) -> $0 =< D andalso D =< $9 end, String)); - _ -> - %% probably a variant of - %% "/bin/sh: line 1: ulimit: command not found\n" - unknown + case proplists:get_value(max_fds, erlang:system_info(check_io)) of + MaxFds when is_integer(MaxFds) andalso MaxFds > 1 -> + case os:type() of + {win32, _OsName} -> + %% On Windows max_fds is twice the number of open files: + %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 + MaxFds / 2; + _Any -> + MaxFds end; _ -> unknown -- cgit v1.2.1 From 788d800206742fd0630dc6cfb5d1a2e533c2f416 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 8 Mar 2011 11:16:35 +0000 Subject: Short comment --- src/file_handle_cache.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index de602d39..f27adfd9 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1179,6 +1179,8 @@ track_client(Pid, Clients) -> end. +%% To increase the number of file descriptors: on Windows set ERL_MAX_PORTS +%% environment variable, on Linux set `ulimit -n`. ulimit() -> case proplists:get_value(max_fds, erlang:system_info(check_io)) of MaxFds when is_integer(MaxFds) andalso MaxFds > 1 -> @@ -1188,6 +1190,7 @@ ulimit() -> %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 MaxFds / 2; _Any -> + %% For other operating systems trust Erlang. MaxFds end; _ -> -- cgit v1.2.1 From 4dfbd8dc307d9fe76b7df03bfc101a55faa09837 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 8 Mar 2011 11:58:10 +0000 Subject: Floats are bad. Use integer division instead. (via Matthias) --- src/file_handle_cache.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index f27adfd9..304f9335 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1188,7 +1188,7 @@ ulimit() -> {win32, _OsName} -> %% On Windows max_fds is twice the number of open files: %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 - MaxFds / 2; + MaxFds div 2; _Any -> %% For other operating systems trust Erlang. MaxFds -- cgit v1.2.1 From 5592ce2dbf48b7d3171e875c775667d4dd9f8fa3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:01:14 +0000 Subject: add BQ:validate_message --- include/rabbit_backing_queue_spec.hrl | 2 + src/rabbit_amqqueue_process.erl | 72 ++++++++++++++++++++++------------- src/rabbit_backing_queue.erl | 5 ++- src/rabbit_mirror_queue_master.erl | 5 +++ src/rabbit_mirror_queue_slave.erl | 13 ++++++- src/rabbit_variable_queue.erl | 4 +- 6 files changed, 71 insertions(+), 30 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 9f4f76ca..4ef13cb3 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -68,3 +68,5 @@ -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((A) -> A), state()) -> {[rabbit_guid:guid()], state()}). +-spec(validate_message/2 :: + (rabbit_types:basic_message(), state()) -> boolean()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 8c19aa16..0c9eba9d 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -487,46 +487,64 @@ attempt_delivery(#delivery{txn = none, sender = ChPid, message = Message, msg_seq_no = MsgSeqNo}, - {NeedsConfirming, State = #q{backing_queue = BQ}}) -> + {NeedsConfirming, State = #q{backing_queue = BQ, + backing_queue_state = BQS}}) -> %% must confirm immediately if it has a MsgSeqNo and not NeedsConfirming case {NeedsConfirming, MsgSeqNo} of {_, undefined} -> ok; {no_confirm, _} -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); {confirm, _} -> ok end, - PredFun = fun (IsEmpty, _State) -> not IsEmpty end, - DeliverFun = - fun (AckRequired, false, State1 = #q{backing_queue_state = BQS}) -> - %% we don't need an expiry here because messages are - %% not being enqueued, so we use an empty - %% message_properties. - {AckTag, BQS1} = - BQ:publish_delivered( - AckRequired, Message, - (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = (NeedsConfirming =:= confirm)}, - ChPid, BQS), - {{Message, false, AckTag}, true, - State1#q{backing_queue_state = BQS1}} - end, - {Delivered, State1} = - deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), - {Delivered, NeedsConfirming, State1}; + case BQ:validate_message(Message, BQS) of + {invalid, _Bool} = Invalid -> + {Invalid, NeedsConfirming, State}; + valid -> + PredFun = fun (IsEmpty, _State) -> not IsEmpty end, + DeliverFun = + fun (AckRequired, false, + State1 = #q{backing_queue_state = BQS1}) -> + %% we don't need an expiry here because + %% messages are not being enqueued, so we use + %% an empty message_properties. + {AckTag, BQS2} = + BQ:publish_delivered( + AckRequired, Message, + (?BASE_MESSAGE_PROPERTIES)#message_properties{ + needs_confirming = + (NeedsConfirming =:= confirm)}, + ChPid, BQS1), + {{Message, false, AckTag}, true, + State1#q{backing_queue_state = BQS2}} + end, + {Delivered, State1} = + deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, + State), + {{valid, Delivered}, NeedsConfirming, State1} + end; attempt_delivery(#delivery{txn = Txn, sender = ChPid, message = Message}, {NeedsConfirming, State = #q{backing_queue = BQ, backing_queue_state = BQS}}) -> - store_ch_record((ch_record(ChPid))#cr{txn = Txn}), - BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, BQS), - {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}. + case BQ:validate_message(Message, BQS) of + {invalid, _Reason} = Invalid -> + {Invalid, NeedsConfirming, State}; + valid -> + store_ch_record((ch_record(ChPid))#cr{txn = Txn}), + BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, + BQS), + {{valid, true}, NeedsConfirming, + State#q{backing_queue_state = BQS1}} + end. deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of - {true, _, State1} -> + {{invalid, seen}, _, State1} -> + {true, State1}; + {{valid, true}, _, State1} -> {true, State1}; - {false, NeedsConfirming, State1 = #q{backing_queue = BQ, - backing_queue_state = BQS}} -> + {{valid, false}, NeedsConfirming, + State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ @@ -863,9 +881,9 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Delivered, _NeedsConfirming, State1} = + {{_Valid, Bool}, _NeedsConfirming, State1} = attempt_delivery(Delivery, record_confirm_message(Delivery, State)), - reply(Delivered, State1); + reply(Bool, State1); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index d42fe140..726b9bef 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -127,7 +127,10 @@ behaviour_info(callbacks) -> %% Passed a function to be invoked with the relevant backing %% queue's state. Useful for when the backing queue or other %% components need to pass functions into the backing queue. - {invoke, 3} + {invoke, 3}, + + %% TODO: document me + {validate_message, 2} ]; behaviour_info(_Other) -> undefined. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 800d9453..513a8bb5 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -280,3 +280,8 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> {MsgIds, BQS1} = BQ:invoke(Mod, Fun, BQS), {MsgIds, State #state { backing_queue_state = BQS1 }}. + +validate_message(Message, #state { backing_queue = BQ, + backing_queue_state = BSQ }) -> + %% this will definitely change. + BQ:validate_message(Message, BQS). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index d9ad7120..0aedff59 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -119,6 +119,16 @@ init([#amqqueue { name = QueueName } = Q]) -> handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) -> %% Synchronous, "immediate" delivery mode + %% + %% TODO: we cannot reply here because we may not have received + %% this from gm, and indeed the master might die before it + %% receives it. Thus if we are promoted to master at that point + %% then we must reply appropriately. So we're going to have to + %% enqueue it, record that it needs a reply, and then reply either + %% when we get the nod via gm, or, if we're promoted, in the mean + %% time we'll have to figure out something else... Of course, if + %% we've already seen it from gm then we're going to have to reply + %% now. gen_server2:reply(From, false), %% master may deliver it, not us noreply(maybe_enqueue_message(Delivery, State)); @@ -419,7 +429,7 @@ maybe_enqueue_message( msg_seq_no = MsgSeqNo, sender = ChPid }, State = #state { sender_queues = SQ, - msg_id_status = MS }) -> + msg_id_status = MS }) -> %% We will never see {published, ChPid, MsgSeqNo} here. case dict:find(MsgId, MS) of error -> @@ -506,6 +516,7 @@ process_instruction( State1 = State #state { sender_queues = SQ1, msg_id_status = MS2 }, + %% we probably want to work in BQ:validate_message here {ok, case Deliver of false -> diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 7019efbb..4ad46f1a 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, multiple_routing_keys/0]). + status/1, invoke/3, validate_message/2, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -868,6 +868,8 @@ status(#vqstate { invoke(?MODULE, Fun, State) -> Fun(State). +validate_message(_Msg, _State) -> true. + %%---------------------------------------------------------------------------- %% Minor helpers %%---------------------------------------------------------------------------- -- cgit v1.2.1 From c8edb5712e80bcf0829ee03ceccb56fac898e4f6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:13:24 +0000 Subject: oops --- include/rabbit_backing_queue_spec.hrl | 4 ++-- src/rabbit_variable_queue.erl | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 4ef13cb3..8f010e58 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -68,5 +68,5 @@ -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((A) -> A), state()) -> {[rabbit_guid:guid()], state()}). --spec(validate_message/2 :: - (rabbit_types:basic_message(), state()) -> boolean()). +-spec(validate_message/2 :: (rabbit_types:basic_message(), state()) -> + {'valid'|'invalid', boolean()}). diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 4ad46f1a..a3f397cc 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -868,7 +868,7 @@ status(#vqstate { invoke(?MODULE, Fun, State) -> Fun(State). -validate_message(_Msg, _State) -> true. +validate_message(_Msg, _State) -> valid. %%---------------------------------------------------------------------------- %% Minor helpers -- cgit v1.2.1 From 4debf471dc5ba63dfb7ad50db24835844eb61939 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:24:18 +0000 Subject: Hmm, I'd forgotten than deliver_immediately == false => the msg _isn't_ enqueued --- src/rabbit_mirror_queue_slave.erl | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 0aedff59..e17eef01 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -119,16 +119,15 @@ init([#amqqueue { name = QueueName } = Q]) -> handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) -> %% Synchronous, "immediate" delivery mode - %% - %% TODO: we cannot reply here because we may not have received - %% this from gm, and indeed the master might die before it - %% receives it. Thus if we are promoted to master at that point - %% then we must reply appropriately. So we're going to have to - %% enqueue it, record that it needs a reply, and then reply either - %% when we get the nod via gm, or, if we're promoted, in the mean - %% time we'll have to figure out something else... Of course, if - %% we've already seen it from gm then we're going to have to reply - %% now. + + %% It is safe to reply 'false' here even if a) we've not seen the + %% msg via gm, or b) the master dies before we receive the msg via + %% gm. In the case of (a), we will eventually receive the msg via + %% gm, and it's only the master's result to the channel that is + %% important. In the case of (b), if the master does die and we do + %% get promoted then at that point we have no consumers, thus + %% 'false' is precisely the correct answer. However, we must be + %% careful to _not_ enqueue the message in this case. gen_server2:reply(From, false), %% master may deliver it, not us noreply(maybe_enqueue_message(Delivery, State)); -- cgit v1.2.1 From b926ae697507a7f61801613107bd90a8e1b226a9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:33:02 +0000 Subject: save a line --- src/rabbit_amqqueue_process.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index b32fa0ff..6c4c8654 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -422,7 +422,7 @@ gb_trees_cons(Key, Value, Tree) -> end. record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> - {no_confirm, State}; + {never, State}; record_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { @@ -431,10 +431,10 @@ record_confirm_message(#delivery{sender = ChPid, State = #q{msg_id_to_channel = MTC, q = #amqqueue{durable = true}}) -> - {confirm, + {eventually, State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; record_confirm_message(_Delivery, State) -> - {no_confirm, State}. + {immediately, State}. run_message_queue(State) -> Funs = {fun deliver_from_queue_pred/2, @@ -451,10 +451,9 @@ attempt_delivery(#delivery{txn = none, msg_seq_no = MsgSeqNo}, {NeedsConfirming, State = #q{backing_queue = BQ}}) -> %% must confirm immediately if it has a MsgSeqNo and not NeedsConfirming - case {NeedsConfirming, MsgSeqNo} of - {_, undefined} -> ok; - {no_confirm, _} -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); - {confirm, _} -> ok + case NeedsConfirming of + immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); + _ -> ok end, PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = @@ -466,7 +465,7 @@ attempt_delivery(#delivery{txn = none, BQ:publish_delivered( AckRequired, Message, (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = (NeedsConfirming =:= confirm)}, + needs_confirming = (NeedsConfirming =:= eventually)}, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} @@ -493,7 +492,7 @@ deliver_or_enqueue(Delivery, State) -> BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ needs_confirming = - (NeedsConfirming =:= confirm)}, + (NeedsConfirming =:= eventually)}, BQS), {false, ensure_ttl_timer(State1#q{backing_queue_state = BQS1})} end. -- cgit v1.2.1 From 2115f9d744d058f93556d43c21eb1a66fcdea847 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 13:35:06 +0000 Subject: save another line. bonus --- src/rabbit_amqqueue_process.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 6c4c8654..cfef08a5 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -428,9 +428,8 @@ record_confirm_message(#delivery{sender = ChPid, message = #basic_message { is_persistent = true, id = MsgId}}, - State = - #q{msg_id_to_channel = MTC, - q = #amqqueue{durable = true}}) -> + State = #q{q = #amqqueue{durable = true}, + msg_id_to_channel = MTC}) -> {eventually, State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; record_confirm_message(_Delivery, State) -> -- cgit v1.2.1 From 41ce63c6dab98f8ce0b80bfcd52d86e1a53ef23d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 8 Mar 2011 14:11:11 +0000 Subject: Don't change the version file format --- src/rabbit_upgrade.erl | 49 ++++++++++++++++++------------------------------- 1 file changed, 18 insertions(+), 31 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f59dbdfe..8113bad8 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -240,50 +240,44 @@ maybe_upgrade_local() -> read_version() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> case is_new_version(V) of - false -> {ok, convert_old_version(V)}; - %% Write in this format for future expansion; - %% we want to allow plugins to own upgrades. - true -> [{rabbit, RV}] = V, - {ok, RV} - end; + {ok, [V]} -> {ok, V}; {error, _} = Err -> Err end. read_version(Scope) -> case read_version() of {error, _} = E -> E; - {ok, V} -> {ok, orddict:fetch(Scope, V)} + {ok, V} -> {ok, filter_by_scope(Scope, V)} end. write_version() -> - ok = rabbit_misc:write_term_file(schema_filename(), - [[{rabbit, desired_version()}]]), + ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), ok. write_version(Scope) -> {ok, V0} = read_version(), - V = orddict:store(Scope, desired_version(Scope), V0), - ok = rabbit_misc:write_term_file(schema_filename(), [[{rabbit, V}]]), + V = flatten([case S of + Scope -> desired_version(S); + _ -> filter_by_scope(S, V0) + end || S <- ?SCOPES]), + ok = rabbit_misc:write_term_file(schema_filename(), [V]), ok. desired_version() -> - lists:foldl( - fun (Scope, Acc) -> - orddict:store(Scope, desired_version(Scope), Acc) - end, - orddict:new(), ?SCOPES). + flatten([desired_version(Scope) || Scope <- ?SCOPES]). desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). -convert_old_version(Heads) -> - Locals = [add_queue_ttl], - V0 = orddict:new(), - V1 = orddict:store(mnesia, Heads -- Locals, V0), - orddict:store(local, - lists:filter(fun(H) -> lists:member(H, Locals) end, Heads), - V1). +flatten(LoL) -> + lists:sort(lists:flatten(LoL)). + +filter_by_scope(Scope, Versions) -> + with_upgrade_graph( + fun(G) -> + ScopeVs = digraph:vertices(G), + [V || V <- Versions, lists:member(V, ScopeVs)] + end, Scope). %% ------------------------------------------------------------------- @@ -399,10 +393,3 @@ lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% NB: we cannot use rabbit_log here since it may not have been %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). - -is_new_version(Version) -> - try - orddict:size(Version) > 0 - catch error:badarg -> - false - end. -- cgit v1.2.1 From b610ff9f65d1289904f34a49a242fe843db094cc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 8 Mar 2011 14:19:40 +0000 Subject: Use lists:append/1. --- src/rabbit_upgrade.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 8113bad8..1284d229 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -270,7 +270,7 @@ desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). flatten(LoL) -> - lists:sort(lists:flatten(LoL)). + lists:sort(lists:append(LoL)). filter_by_scope(Scope, Versions) -> with_upgrade_graph( -- cgit v1.2.1 From a0da6633d6c511267d14cdc79771dfdc73e5d17f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 8 Mar 2011 15:44:58 +0000 Subject: Just some more notes --- src/rabbit_mirror_queue_slave.erl | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index e17eef01..87ce31d8 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -359,7 +359,18 @@ promote_me(From, #state { q = Q, %% publish stuff by sending it to ourself - we must pass it %% through to this init, otherwise we can violate ordering %% constraints. - GTC = dict:from_list( + + %% MTC should contain only entries for which we are still + %% expecting confirms to come back to use from the underlying BQ. + + %% TODO: what do we do with entries in MS that are 'confirmed' + %% already? Well they should end up in the master queue's state, + %% and the confirms should be issued either by the + %% amqqueue_process if 'immediately', or otherwise by the master + %% queue on validate_message?! That's disgusting. There's no way + %% validate_message should be side-effecting... though we could at + %% least ensure it's idempotent. Hmm. + MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || {MsgId, {published, ChPid, MsgSeqNo}} <- dict:to_list(MS)]), AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)], @@ -367,7 +378,7 @@ promote_me(From, #state { q = Q, || {_ChPid, PubQ} <- dict:to_list(SQ)]), QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( Q, rabbit_mirror_queue_master, MasterState, RateTRef, - AckTags, Deliveries, GTC), + AckTags, Deliveries, MTC), {become, rabbit_amqqueue_process, QueueState, hibernate}. noreply(State) -> -- cgit v1.2.1 From 72fabd498be215a5a95927a5448d079bdfb4fe05 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 8 Mar 2011 15:56:23 +0000 Subject: Tweaked accumulator again - no need to store the entire path --- src/rabbit_exchange_type_topic.erl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 7cff129c..65518287 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -85,7 +85,7 @@ remove_bindings(true, X, Bs) -> [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {Node, {[{Node, W}, {Parent, _} | _], 0, 0}} + {Node, {Parent, W, {0, 0}}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> @@ -101,25 +101,26 @@ maybe_add_path(X, Path = [{Node, _} | _], PathAcc) -> decrement_bindings(X, Path, PathAcc) -> with_path_acc(X, - fun({_Path, Bindings, Edges}) -> - {Path, Bindings - 1, Edges} + fun({Bindings, Edges}) -> + {Bindings - 1, Edges} end, Path, PathAcc). decrement_edges(X, Path, PathAcc) -> with_path_acc(X, - fun({_Path, Bindings, Edges}) -> - {Path, Bindings, Edges - 1} + fun({Bindings, Edges}) -> + {Bindings, Edges - 1} end, Path, PathAcc). with_path_acc(_X, _Fun, [{root, none}], PathAcc) -> PathAcc; with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> - NewVal = Fun(gb_trees:get(Node, PathAcc)), - NewPathAcc = gb_trees:update(Node, NewVal, PathAcc), - case NewVal of - {_, 0, 0} -> + {Parent, W, Counts} = gb_trees:get(Node, PathAcc), + NewCounts = Fun(Counts), + NewPathAcc = gb_trees:update(Node, {Parent, W, NewCounts}, PathAcc), + case NewCounts of + {0, 0} -> decrement_edges(X, ParentPath, maybe_add_path(X, ParentPath, NewPathAcc)); _ -> -- cgit v1.2.1 From 291ae6f85aa36cb2dc14f6f864509b82502c71a1 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Tue, 8 Mar 2011 16:18:22 +0000 Subject: Fix bad path_entry creation --- src/rabbit_exchange_type_topic.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 65518287..ffab0fcb 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -127,8 +127,8 @@ with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> NewPathAcc end. -path_entry(X, Path = [{Node, _} | _]) -> - {Path, trie_binding_count(X, Node), trie_child_count(X, Node)}. +path_entry(X, Path = [{Node, W}, {Parent, _} | _]) -> + {Parent, W, {trie_binding_count(X, Node), trie_child_count(X, Node)}}. binding_path(#binding{source = X, key = K}) -> follow_down_get_path(X, split_topic_key(K)). -- cgit v1.2.1 From 39db9d66cc5ec7390df89fd815939dd96d0ae903 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 8 Mar 2011 16:25:43 +0000 Subject: Pin the URL comment. --- src/file_handle_cache.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index 304f9335..b26bb988 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1187,7 +1187,7 @@ ulimit() -> case os:type() of {win32, _OsName} -> %% On Windows max_fds is twice the number of open files: - %% https://github.com/yrashk/erlang/blob/master/erts/emulator/sys/win32/sys.c#L2463-2466 + %% https://github.com/yrashk/erlang/blob/e1282325ed75e52a98d5/erts/emulator/sys/win32/sys.c#L2459-2466 MaxFds div 2; _Any -> %% For other operating systems trust Erlang. -- cgit v1.2.1 From b52a76575e3e7e875a8d9cababfb9247490f716c Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 8 Mar 2011 22:20:43 +0000 Subject: cosmetic changes and some inlining --- src/rabbit_exchange_type_topic.erl | 45 ++++++++++++++------------------------ 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffab0fcb..a7d36533 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -69,48 +69,42 @@ add_binding(false, _Exchange, _Binding) -> remove_bindings(true, X, Bs) -> %% The remove process is split into two distinct phases. In the - %% first phase, we first gather the lists of bindings and edges to + %% first phase we gather the lists of bindings and edges to %% delete, then in the second phase we process all the %% deletions. This is to prevent interleaving of read/write %% operations in mnesia that can adversely affect performance. {ToDelete, Paths} = lists:foldl( - fun(B = #binding{destination = D}, {Acc, PathAcc}) -> - Path = [{FinalNode, _} | _] = binding_path(B), - PathAcc1 = decrement_bindings(X, Path, - maybe_add_path(X, Path, - PathAcc)), - {[{FinalNode, D} | Acc], PathAcc1} + fun(#binding{source = S, key = K, destination = D}, {Acc, PathAcc}) -> + Path = [{FinalNode, _} | _] = + follow_down_get_path(S, split_topic_key(K)), + {[{FinalNode, D} | Acc], + decrement_bindings(X, Path, maybe_add_path(X, Path, PathAcc))} end, {[], gb_trees:empty()}, Bs), [trie_remove_binding(X, FinalNode, D) || {FinalNode, D} <- ToDelete], [trie_remove_edge(X, Parent, Node, W) || - {Node, {Parent, W, {0, 0}}} - <- gb_trees:to_list(Paths)], + {Node, {Parent, W, {0, 0}}} <- gb_trees:to_list(Paths)], ok; remove_bindings(false, _X, _Bs) -> ok. maybe_add_path(_X, [{root, none}], PathAcc) -> PathAcc; -maybe_add_path(X, Path = [{Node, _} | _], PathAcc) -> +maybe_add_path(X, [{Node, W}, {Parent, _} | _], PathAcc) -> case gb_trees:is_defined(Node, PathAcc) of true -> PathAcc; - false -> gb_trees:insert(Node, path_entry(X, Path), PathAcc) + false -> gb_trees:insert(Node, {Parent, W, {trie_binding_count(X, Node), + trie_child_count(X, Node)}}, + PathAcc) end. decrement_bindings(X, Path, PathAcc) -> - with_path_acc(X, - fun({Bindings, Edges}) -> - {Bindings - 1, Edges} - end, + with_path_acc(X, fun({Bindings, Edges}) -> {Bindings - 1, Edges} end, Path, PathAcc). decrement_edges(X, Path, PathAcc) -> - with_path_acc(X, - fun({Bindings, Edges}) -> - {Bindings, Edges - 1} - end, + with_path_acc(X, fun({Bindings, Edges}) -> {Bindings, Edges - 1} end, Path, PathAcc). with_path_acc(_X, _Fun, [{root, none}], PathAcc) -> @@ -120,18 +114,11 @@ with_path_acc(X, Fun, [{Node, _} | ParentPath], PathAcc) -> NewCounts = Fun(Counts), NewPathAcc = gb_trees:update(Node, {Parent, W, NewCounts}, PathAcc), case NewCounts of - {0, 0} -> - decrement_edges(X, ParentPath, - maybe_add_path(X, ParentPath, NewPathAcc)); - _ -> - NewPathAcc + {0, 0} -> decrement_edges(X, ParentPath, + maybe_add_path(X, ParentPath, NewPathAcc)); + _ -> NewPathAcc end. -path_entry(X, Path = [{Node, W}, {Parent, _} | _]) -> - {Parent, W, {trie_binding_count(X, Node), trie_child_count(X, Node)}}. - -binding_path(#binding{source = X, key = K}) -> - follow_down_get_path(X, split_topic_key(K)). assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). -- cgit v1.2.1 From 4fd145f52df6e05353e5b0cfb3b30fb9081a50a7 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 9 Mar 2011 00:14:21 +0000 Subject: close channel when test finishes --- src/rabbit_tests.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 4ad35696..3416fe0d 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1290,13 +1290,16 @@ test_confirms() -> after 1000 -> ok end, - %% Delete queue + %% Cleanup rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), receive #'queue.delete_ok'{} -> ok after 1000 -> throw(failed_to_cleanup_queue) end, + unlink(Ch), + ok = rabbit_channel:shutdown(Ch), + passed. test_statistics() -> -- cgit v1.2.1 From 3141efa589d2cb4097e16fd744b1bccf43d6e270 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 9 Mar 2011 09:30:15 +0000 Subject: change if to case Mhm. --- src/rabbit_channel.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 8afa2d8d..f584ff32 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -548,10 +548,10 @@ remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack, Qs1 = gb_sets:del_element(QPid, Qs), %% If QPid somehow died initiating a nack, clear the message from %% internal data-structures. Also, cleanup empty entries. - Empty = gb_sets:is_empty(Qs1), - if (Empty orelse Nack) -> + case (Nack orelse gb_sets:is_empty(Qs1)) of + true -> {[{MsgSeqNo, XName} | MXs], gb_trees:delete(MsgSeqNo, UMQ), UQM1}; - true -> + false -> {MXs, gb_trees:update(MsgSeqNo, {XName, Qs1}, UMQ), UQM1} end. -- cgit v1.2.1 From 22007275cb3d133e047a291510d716b23fe05dfb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 11:17:27 +0000 Subject: Correct upgrade step --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d1307b85..c75ecf86 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -294,7 +294,7 @@ %%---------------------------------------------------------------------------- --rabbit_upgrade({multiple_routing_keys, []}). +-rabbit_upgrade({multiple_routing_keys, local, []}). -ifdef(use_specs). -- cgit v1.2.1 From efc937c34e3984ab25a053ff240d8d8e0034f5b6 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 9 Mar 2011 11:31:47 +0000 Subject: Fixed bug in the QPid tests. My tests were passing because I used an exchange name in both add and remove calls --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index a7d36533..ffd1e583 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -67,7 +67,7 @@ add_binding(true, _Exchange, Binding) -> add_binding(false, _Exchange, _Binding) -> ok. -remove_bindings(true, X, Bs) -> +remove_bindings(true, #exchange{name = X}, Bs) -> %% The remove process is split into two distinct phases. In the %% first phase we gather the lists of bindings and edges to %% delete, then in the second phase we process all the -- cgit v1.2.1 From a3f01f3123c3c4b5d5ab6353a5121b5a1d5a999c Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 9 Mar 2011 11:53:39 +0000 Subject: remove misleading comment --- src/rabbit_amqqueue_process.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index cfef08a5..89d2e0cb 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -449,7 +449,6 @@ attempt_delivery(#delivery{txn = none, message = Message, msg_seq_no = MsgSeqNo}, {NeedsConfirming, State = #q{backing_queue = BQ}}) -> - %% must confirm immediately if it has a MsgSeqNo and not NeedsConfirming case NeedsConfirming of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok -- cgit v1.2.1 From fc071687a13dcde2b3431cad21e7ca270c6f2898 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 13:19:10 +0000 Subject: just stashing this whilst I fix something else --- src/rabbit_amqqueue_process.erl | 2 +- src/rabbit_mirror_queue_master.erl | 112 ++++++++++++++++++------------------- 2 files changed, 56 insertions(+), 58 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 0c9eba9d..b3e04337 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -539,7 +539,7 @@ attempt_delivery(#delivery{txn = Txn, deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of - {{invalid, seen}, _, State1} -> + {{invalid, _Bool}, _, State1} -> {true, State1}; {{valid, true}, _, State1} -> {true, State1}; diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 513a8bb5..a1e2a49a 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -102,62 +102,30 @@ purge(State = #state { gm = GM, set_delivered = 0 }}. publish(Msg = #basic_message { id = MsgId }, MsgProps, ChPid, - State = #state { gm = GM, - backing_queue = BQ }) -> - {ok, State1} = - maybe_publish( - fun (BQS) -> - ok = gm:broadcast(GM, {publish, false, ChPid, MsgProps, Msg}), - {ok, BQ:publish(Msg, MsgProps, ChPid, BQS)} - end, MsgId, State), - State1. + State = #state { gm = GM, + seen_status = SS, + backing_queue = BQ, + backing_queue_state = BQS }) -> + false = dict:is_key(MsgId, SS), %% ASSERTION + ok = gm:broadcast(GM, {publish, false, ChPid, MsgProps, Msg}), + BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), + State #state { backing_queue_state = BQS1 }. publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, - ChPid, State = #state { gm = GM, - backing_queue = BQ }) -> - case maybe_publish( - fun (BQS) -> - ok = gm:broadcast(GM, {publish, {true, AckRequired}, ChPid, - MsgProps, Msg}), - BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS) - end, MsgId, State) of - {ok, State1} -> - %% publish_delivered but we've already published this - %% message. This means that we received the msg when we - %% were a slave but only via GM, not from the - %% channel. - %% - %% If AckRequired then we would have requeued the message - %% upon our promotion to master. Astonishingly, we think - %% we're empty, which means that someone else has already - %% consumed the message post requeue, and now we're about - %% to send it to another consumer. This could not be more - %% wrong. - -maybe_publish(Fun, MsgId, State = #state { seen_status = SS, - backing_queue_state = BQS }) -> - %% We will never see {published, ChPid, MsgSeqNo} here. - case dict:find(MsgId, SS) of - error -> - {Result, BQS1} = Fun(BQS), - {Result, State #state { backing_queue_state = BQS1 }}; - {ok, {published, ChPid}} -> - %% It already got published when we were a slave and no - %% confirmation is waiting. amqqueue_process will have - %% recorded if there's a confirm due to arrive, so can - %% delete entry. - {ok, State #state { seen_status = dict:erase(MsgId, SS) }}; - {ok, {confirmed, ChPid}} -> - %% It got confirmed before we became master, but we've - %% only just received the publish from the channel, so - %% couldn't previously know what the msg_seq_no was. Thus - %% confirm now. amqqueue_process will have recorded a - %% confirm is due immediately prior to here (and thus _it_ - %% knows the msg_id -> msg_seq_no mapping). - ok = rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( - self(), ?MODULE, fun (State1) -> {[MsgId], State1} end), - {ok, State #state { seen_status = dict:erase(MsgId, SS) }} - end. + ChPid, State = #state { gm = GM, + backing_queue = BQ, + seen_status = SS, + backing_queue = BQ, + backing_queue_state = BQS }) -> + false = dict:is_key(MsgId, SS), %% ASSERTION + %% Must use confirmed_broadcast here in order to guarantee that + %% all slaves are forced to interpret this publish_delivered at + %% the same point, especially if we die and a slave is promoted. + BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), + ok = gm:confirmed_broadcast( + GM, {publish, {true, AckRequired}, ChPid, MsgProps, Msg}), + BQS1 = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), + State #state { backing_queue_state = BQS1 }. dropwhile(Fun, State = #state { gm = GM, backing_queue = BQ, @@ -281,7 +249,37 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, {MsgIds, BQS1} = BQ:invoke(Mod, Fun, BQS), {MsgIds, State #state { backing_queue_state = BQS1 }}. -validate_message(Message, #state { backing_queue = BQ, +validate_message(Message, #state { seen_status = SS, + backing_queue = BQ, backing_queue_state = BSQ }) -> - %% this will definitely change. - BQ:validate_message(Message, BQS). + %% Here, we need to deal with the possibility that we're about to + %% receive a message that we've already seen when we were a slave + %% (we received it via gm). Thus if we do receive such message now + %% via the channel, there may be a confirm waiting to issue for + %% it. + + %% We will never see {published, ChPid, MsgSeqNo} here. + case dict:find(MsgId, SS) of + error -> + %% We permit the underlying BQ to have a peek at it, but + %% only if we ourselves are not filtering out the msg. + BQ:validate_message(Message, BQS); + {ok, {published, ChPid}} -> + %% It already got published when we were a slave and no + %% confirmation is waiting. amqqueue_process will have, in + %% its msg_id_to_channel mapping, the entry for dealing + %% with the confirm when that comes back in, so the msg is + %% invalid, and we don't need to do anything further here. + {invalid, false}; + {ok, {confirmed, ChPid}} -> + %% It got confirmed before we became master, but we've + %% only just received the publish from the channel, so + %% couldn't previously know what the msg_seq_no was. Thus + %% confirm now. amqqueue_process will have recorded a + %% confirm is due immediately prior to here (and thus _it_ + %% knows the msg_id -> msg_seq_no mapping). + ok = rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( + self(), ?MODULE, fun (State1) -> {[MsgId], State1} end), + {ok, State #state { seen_status = dict:erase(MsgId, SS) }} + end. + -- cgit v1.2.1 From 694a2c6d44074a9541126c1625f6aa8834357272 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 13:22:39 +0000 Subject: save 2 lines: no one cares about the result of deliver_or_enqueue, so don't bother with one --- src/rabbit_amqqueue_process.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 89d2e0cb..54c92dc7 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -483,7 +483,7 @@ attempt_delivery(#delivery{txn = Txn, deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of {true, _, State1} -> - {true, State1}; + State1; {false, NeedsConfirming, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, @@ -492,7 +492,7 @@ deliver_or_enqueue(Delivery, State) -> needs_confirming = (NeedsConfirming =:= eventually)}, BQS), - {false, ensure_ttl_timer(State1#q{backing_queue_state = BQS1})} + ensure_ttl_timer(State1#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> @@ -822,8 +822,7 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. gen_server2:reply(From, true), - {_Delivered, NewState} = deliver_or_enqueue(Delivery, State), - noreply(NewState); + noreply(deliver_or_enqueue(Delivery, State)); handle_call({commit, Txn, ChPid}, From, State) -> case lookup_ch(ChPid) of @@ -985,8 +984,7 @@ handle_cast(sync_timeout, State) -> handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. - {_Delivered, NewState} = deliver_or_enqueue(Delivery, State), - noreply(NewState); + noreply(deliver_or_enqueue(Delivery, State)); handle_cast({ack, Txn, AckTags, ChPid}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> -- cgit v1.2.1 From a58c4974332cd71c0ed59a710f0dda67c5002c83 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 15:08:41 +0000 Subject: Sort out and clean the validate_message stuff. It compiles again now... --- src/rabbit_amqqueue_process.erl | 24 ++++++++++-------------- src/rabbit_mirror_queue_master.erl | 12 ++++++------ src/rabbit_mirror_queue_slave.erl | 14 ++++++++++---- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index eb3b13cc..81e260bd 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -494,7 +494,9 @@ attempt_delivery(#delivery{txn = none, end, case BQ:validate_message(Message, BQS) of {invalid, BQS1} -> - {invalid, NeedsConfirming, State#q{backing_queue_state = BQS1}}; + %% if the message is invalid, we pretend it was delivered + %% fine + {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}; {valid, BQS1} -> PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = @@ -516,7 +518,7 @@ attempt_delivery(#delivery{txn = none, {Delivered, State2} = deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State#q{backing_queue_state = BQS1}), - {{valid, Delivered}, NeedsConfirming, State2} + {Delivered, NeedsConfirming, State2} end; attempt_delivery(#delivery{txn = Txn, sender = ChPid, @@ -525,22 +527,19 @@ attempt_delivery(#delivery{txn = Txn, backing_queue_state = BQS}}) -> case BQ:validate_message(Message, BQS) of {invalid, BQS1} -> - {invalid, NeedsConfirming, State#q{backing_queue_state = BQS1}}; + {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}; {valid, BQS1} -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS2 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, BQS1), - {{valid, true}, NeedsConfirming, - State#q{backing_queue_state = BQS2}} + {true, NeedsConfirming, State#q{backing_queue_state = BQS2}} end. deliver_or_enqueue(Delivery, State) -> case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of - {invalid, _, State1} -> + {true, _, State1} -> State1; - {{valid, true}, _, State1} -> - State1; - {{valid, false}, NeedsConfirming, + {false, NeedsConfirming, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, BQS1 = BQ:publish(Message, @@ -878,12 +877,9 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Valid, _NeedsConfirming, State1} = + {Delivered, _NeedsConfirming, State1} = attempt_delivery(Delivery, record_confirm_message(Delivery, State)), - reply(case Valid of - valid -> true; - invalid -> false - end, State1); + reply(Delivered, State1); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index dd2357bb..704e62c1 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3]). + status/1, invoke/3, validate_message/2]). -export([start/1, stop/0]). @@ -113,7 +113,6 @@ publish(Msg = #basic_message { id = MsgId }, MsgProps, ChPid, publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, ChPid, State = #state { gm = GM, - backing_queue = BQ, seen_status = SS, backing_queue = BQ, backing_queue_state = BQS }) -> @@ -252,7 +251,7 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, validate_message(Message = #basic_message { id = MsgId }, State = #state { seen_status = SS, backing_queue = BQ, - backing_queue_state = BSQ }) -> + backing_queue_state = BQS }) -> %% Here, we need to deal with the possibility that we're about to %% receive a message that we've already seen when we were a slave %% (we received it via gm). Thus if we do receive such message now @@ -266,14 +265,15 @@ validate_message(Message = #basic_message { id = MsgId }, %% only if we ourselves are not filtering out the msg. {Result, BQS1} = BQ:validate_message(Message, BQS), {Result, State #state { backing_queue_state = BQS1 }}; - {ok, {published, ChPid}} -> + {ok, {published, _ChPid}} -> %% It already got published when we were a slave and no %% confirmation is waiting. amqqueue_process will have, in %% its msg_id_to_channel mapping, the entry for dealing %% with the confirm when that comes back in. The msg is - %% invalid. We will not see this again, so erase. + %% invalid. We will not see this again, nor will we be + %% further involved in confirming this message, so erase. {invalid, State #state { seen_status = dict:erase(MsgId, SS) }}; - {ok, {confirmed, ChPid}} -> + {ok, {confirmed, _ChPid}} -> %% It got confirmed before we became master, but we've %% only just received the publish from the channel, so %% couldn't previously know what the msg_seq_no was. Thus diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 87ce31d8..68dd50e2 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -128,13 +128,17 @@ handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) -> %% get promoted then at that point we have no consumers, thus %% 'false' is precisely the correct answer. However, we must be %% careful to _not_ enqueue the message in this case. + + %% Note this is distinct from the case where we receive the msg + %% via gm first, then we're promoted to master, and only then do + %% we receive the msg from the channel. gen_server2:reply(From, false), %% master may deliver it, not us - noreply(maybe_enqueue_message(Delivery, State)); + noreply(maybe_enqueue_message(Delivery, false, State)); handle_call({deliver, Delivery = #delivery {}}, From, State) -> %% Synchronous, "mandatory" delivery mode gen_server2:reply(From, true), %% amqqueue throws away the result anyway - noreply(maybe_enqueue_message(Delivery, State)); + noreply(maybe_enqueue_message(Delivery, true, State)); handle_call({gm_deaths, Deaths}, From, State = #state { q = #amqqueue { name = QueueName }, @@ -170,7 +174,7 @@ handle_cast({gm, Instruction}, State) -> handle_cast({deliver, Delivery = #delivery {}}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. - noreply(maybe_enqueue_message(Delivery, State)); + noreply(maybe_enqueue_message(Delivery, true, State)); handle_cast({set_maximum_since_use, Age}, State) -> ok = file_handle_cache:set_maximum_since_use(Age), @@ -438,6 +442,7 @@ maybe_enqueue_message( Delivery = #delivery { message = #basic_message { id = MsgId }, msg_seq_no = MsgSeqNo, sender = ChPid }, + EnqueueOnPromotion, State = #state { sender_queues = SQ, msg_id_status = MS }) -> %% We will never see {published, ChPid, MsgSeqNo} here. @@ -447,7 +452,8 @@ maybe_enqueue_message( {ok, MQ1} -> MQ1; error -> queue:new() end, - SQ1 = dict:store(ChPid, queue:in(Delivery, MQ), SQ), + SQ1 = dict:store(ChPid, + queue:in({Delivery, EnqueueOnPromotion}, MQ), SQ), State #state { sender_queues = SQ1 }; {ok, {confirmed, ChPid}} -> %% BQ has confirmed it but we didn't know what the -- cgit v1.2.1 From e52e2d3b2aa322e28bc5e1e37246548de2d3c8a6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 15:12:48 +0000 Subject: fix --- src/rabbit_mirror_queue_slave.erl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 68dd50e2..1a239880 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -501,9 +501,10 @@ process_instruction( case queue:out(MQ) of {empty, _MQ} -> {SQ, MS1}; - {{value, Delivery = #delivery { - msg_seq_no = MsgSeqNo, - message = #basic_message { id = MsgId } }}, + {{value, {Delivery = #delivery { + msg_seq_no = MsgSeqNo, + message = #basic_message { id = MsgId } }, + _EnqueueOnPromotion}}, MQ1} -> %% We received the msg from the channel %% first. Thus we need to deal with confirms @@ -519,7 +520,7 @@ process_instruction( ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), MS end}; - {{value, #delivery {}}, _MQ1} -> + {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ1} -> %% The instruction was sent to us before we %% were within the mirror_pids within the %% #amqqueue{} record. We'll never receive the -- cgit v1.2.1 From 64fbfeb070380fe31a4242d8f3c0c2384adafd2d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 15:26:30 +0000 Subject: Correct documentation. Code is still wrong though --- src/rabbit_mirror_queue_slave.erl | 46 ++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 1a239880..481c9dd4 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -357,23 +357,35 @@ promote_me(From, #state { q = Q, ok = gm:confirmed_broadcast(GM, heartbeat), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( CPid, BQ, BQS, GM, MS), - %% We have to do the requeue via this init because otherwise we - %% don't have access to the relevent MsgPropsFun. Also, we are - %% already in mnesia as the master queue pid. Thus we cannot just - %% publish stuff by sending it to ourself - we must pass it - %% through to this init, otherwise we can violate ordering - %% constraints. - - %% MTC should contain only entries for which we are still - %% expecting confirms to come back to use from the underlying BQ. - - %% TODO: what do we do with entries in MS that are 'confirmed' - %% already? Well they should end up in the master queue's state, - %% and the confirms should be issued either by the - %% amqqueue_process if 'immediately', or otherwise by the master - %% queue on validate_message?! That's disgusting. There's no way - %% validate_message should be side-effecting... though we could at - %% least ensure it's idempotent. Hmm. + + %% We find all the messages that we've received from channels but + %% not from gm, and if they're due to be enqueued on promotion + %% then we pass them to the + %% queue_process:init_with_backing_queue_state to be enqueued. + + %% We also have to requeue messages which are pending acks: the + %% consumers from the master queue have been lost and so these + %% messages need requeuing. They might also be pending + %% confirmation, and indeed they might also be pending arrival of + %% the publication from the channel itself, if we received both + %% the publication and the fetch via gm first! Requeuing doesn't + %% affect confirmations: if the message was previously pending a + %% confirmation then it still will be, under the same msg_id. So + %% as a master, we need to be prepared to filter out the + %% publication of said messages from the channel (validate_message + %% (thus such requeued messages must remain in the msg_id_status + %% which becomes seen_status in the master)). + + %% Then there are messages we already have in the queue, which are + %% not currently pending acknowledgement: + %% 1. Messages we've only received via gm: + %% Filter out subsequent publication from channel through + %% validate_message. Might have to issue confirms then or + %% later, thus queue_process state will have to know that + %% there's a pending confirm. + %% 2. Messages received via both gm and channel: + %% Queue will have to deal with issuing confirms if necessary. + MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || {MsgId, {published, ChPid, MsgSeqNo}} <- dict:to_list(MS)]), -- cgit v1.2.1 From 34024504caa97ec6f85f4d375537d76d7554f350 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 16:28:52 +0000 Subject: Further improvement of documentation --- src/rabbit_mirror_queue_slave.erl | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 481c9dd4..8c765d3c 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -355,14 +355,12 @@ promote_me(From, #state { q = Q, true = unlink(GM), gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), - MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, MS), %% We find all the messages that we've received from channels but %% not from gm, and if they're due to be enqueued on promotion %% then we pass them to the %% queue_process:init_with_backing_queue_state to be enqueued. - + %% %% We also have to requeue messages which are pending acks: the %% consumers from the master queue have been lost and so these %% messages need requeuing. They might also be pending @@ -374,8 +372,8 @@ promote_me(From, #state { q = Q, %% as a master, we need to be prepared to filter out the %% publication of said messages from the channel (validate_message %% (thus such requeued messages must remain in the msg_id_status - %% which becomes seen_status in the master)). - + %% (MS) which becomes seen_status (SS) in the master)). + %% %% Then there are messages we already have in the queue, which are %% not currently pending acknowledgement: %% 1. Messages we've only received via gm: @@ -385,6 +383,36 @@ promote_me(From, #state { q = Q, %% there's a pending confirm. %% 2. Messages received via both gm and channel: %% Queue will have to deal with issuing confirms if necessary. + %% + %% MS contains the following three entry types: + %% + %% {published, ChPid}: + %% published via gm only; pending arrival of publication from + %% channel, maybe pending confirm. + %% + %% {published, ChPid, MsgSeqNo}: + %% published via gm and channel; pending confirm. + %% + %% {confirmed, ChPid}: + %% published via gm only, and confirmed; pending publication + %% from channel. + %% + %% The middle form only, needs to go through to the queue_process + %% state to form the msg_id_to_channel mapping (MTC). + %% + %% The two outer forms only, need to go to the master state + %% seen_status (SS). + %% + %% No messages that are enqueued from SQ at this point will have + %% entries in MS. + %% + %% Messages that are extracted from MA may have entries in MS, and + %% those messages are then requeued. However, as discussed above, + %% this does not affect MS, nor which bits go through to SS in + %% Master, or MTC in queue_process. + + MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( + CPid, BQ, BQS, GM, MS), MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || -- cgit v1.2.1 From 4bb562f335972ab764433103286ae1fb04dffb9c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 16:39:59 +0000 Subject: There's a chance that might be it --- src/rabbit_mirror_queue_slave.erl | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 8c765d3c..6369e114 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -397,12 +397,12 @@ promote_me(From, #state { q = Q, %% published via gm only, and confirmed; pending publication %% from channel. %% - %% The middle form only, needs to go through to the queue_process - %% state to form the msg_id_to_channel mapping (MTC). - %% %% The two outer forms only, need to go to the master state %% seen_status (SS). %% + %% The middle form only, needs to go through to the queue_process + %% state to form the msg_id_to_channel mapping (MTC). + %% %% No messages that are enqueued from SQ at this point will have %% entries in MS. %% @@ -411,15 +411,21 @@ promote_me(From, #state { q = Q, %% this does not affect MS, nor which bits go through to SS in %% Master, or MTC in queue_process. + SS = dict:filter(fun ({published, _ChPid}) -> true; + ({published, _ChPid, _MsgSeqNo}) -> false; + ({confirmed, _ChPid}) -> true + end, MS), + MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, MS), + CPid, BQ, BQS, GM, SS), - MTC = dict:from_list( - [{MsgId, {ChPid, MsgSeqNo}} || - {MsgId, {published, ChPid, MsgSeqNo}} <- dict:to_list(MS)]), + MTC = dict:filter(fun ({published, _ChPid}) -> false; + ({published, _ChPid, _MsgSeqNo}) -> true; + ({confirmed, _ChPid}) -> false + end, MS), AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)], - Deliveries = lists:append([queue:to_list(PubQ) - || {_ChPid, PubQ} <- dict:to_list(SQ)]), + Deliveries = [Delivery || {_ChPid, PubQ} <- dict:to_list(SQ), + {Delivery, true} <- queue:to_list(PubQ)], QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( Q, rabbit_mirror_queue_master, MasterState, RateTRef, AckTags, Deliveries, MTC), -- cgit v1.2.1 From da854e15d58fe71ea5478dccc61f88b2eab9d2a1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 9 Mar 2011 17:03:07 +0000 Subject: or that might be a bit better... --- src/rabbit_amqqueue_process.erl | 6 ++---- src/rabbit_mirror_queue_slave.erl | 14 +++++++------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 81e260bd..d8cd510b 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -142,10 +142,8 @@ init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS, stats_timer = rabbit_event:init_stats_timer(), msg_id_to_channel = MTC})), lists:foldl( - fun (Delivery, StateN) -> - {_Delivered, StateN1} = deliver_or_enqueue(Delivery, StateN), - StateN1 - end, State, Deliveries). + fun (Delivery, StateN) -> deliver_or_enqueue(Delivery, StateN) end, + State, Deliveries). terminate(shutdown, State = #q{backing_queue = BQ}) -> terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 6369e114..57ddf8db 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -222,7 +222,7 @@ terminate(Reason, #state { q = Q, rate_timer_ref = RateTRef }) -> ok = gm:leave(GM), QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( - Q, BQ, BQS, RateTRef, [], []), + Q, BQ, BQS, RateTRef, [], [], dict:new()), rabbit_amqqueue_process:terminate(Reason, QueueState); terminate([_SPid], _Reason) -> %% gm case @@ -411,17 +411,17 @@ promote_me(From, #state { q = Q, %% this does not affect MS, nor which bits go through to SS in %% Master, or MTC in queue_process. - SS = dict:filter(fun ({published, _ChPid}) -> true; - ({published, _ChPid, _MsgSeqNo}) -> false; - ({confirmed, _ChPid}) -> true + SS = dict:filter(fun (_MsgId, {published, _ChPid}) -> true; + (_MsgId, {published, _ChPid, _MsgSeqNo}) -> false; + (_MsgId, {confirmed, _ChPid}) -> true end, MS), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( CPid, BQ, BQS, GM, SS), - MTC = dict:filter(fun ({published, _ChPid}) -> false; - ({published, _ChPid, _MsgSeqNo}) -> true; - ({confirmed, _ChPid}) -> false + MTC = dict:filter(fun (_MsgId, {published, _ChPid}) -> false; + (_MsgId, {published, _ChPid, _MsgSeqNo}) -> true; + (_MsgId, {confirmed, _ChPid}) -> false end, MS), AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)], Deliveries = [Delivery || {_ChPid, PubQ} <- dict:to_list(SQ), -- cgit v1.2.1 From 2ca5a6c3ce467184008067dd5f53876327ce9f01 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 10 Mar 2011 12:51:49 +0000 Subject: It was right before... --- src/rabbit_mirror_queue_slave.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 57ddf8db..950df509 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -419,10 +419,10 @@ promote_me(From, #state { q = Q, MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( CPid, BQ, BQS, GM, SS), - MTC = dict:filter(fun (_MsgId, {published, _ChPid}) -> false; - (_MsgId, {published, _ChPid, _MsgSeqNo}) -> true; - (_MsgId, {confirmed, _ChPid}) -> false - end, MS), + + MTC = dict:from_list( + [{MsgId, {ChPid, MsgSeqNo}} || + {MsgId, {published, ChPid, MsgSeqNo}} <- dict:to_list(MS)]), AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)], Deliveries = [Delivery || {_ChPid, PubQ} <- dict:to_list(SQ), {Delivery, true} <- queue:to_list(PubQ)], -- cgit v1.2.1 From 5f11273b7379510d20b3792d30b8a0c9481353ef Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 10 Mar 2011 13:50:11 +0000 Subject: Make creation of slaves synchronous. This means we are guaranteed on queue.declare to have all slaves up by the time of the _ok --- src/rabbit_mirror_queue_coordinator.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 30fd6ed3..bd77c976 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -39,7 +39,7 @@ start_link(Queue, GM) -> gen_server2:start_link(?MODULE, [Queue, GM], []). add_slave(CPid, SlaveNode) -> - gen_server2:cast(CPid, {add_slave, SlaveNode}). + gen_server2:call(CPid, {add_slave, SlaveNode}, infinity). get_gm(CPid) -> gen_server2:call(CPid, get_gm, infinity). @@ -67,9 +67,9 @@ init([#amqqueue { name = QueueName } = Q, GM]) -> {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. handle_call(get_gm, _From, State = #state { gm = GM }) -> - reply(GM, State). + reply(GM, State); -handle_cast({add_slave, Node}, State = #state { q = Q }) -> +handle_call({add_slave, Node}, _From, State = #state { q = Q }) -> Nodes = nodes(), case lists:member(Node, Nodes) of true -> @@ -81,7 +81,7 @@ handle_cast({add_slave, Node}, State = #state { q = Q }) -> "Ignoring request to add slave on node ~p for ~s~n", [Node, rabbit_misc:rs(Q #amqqueue.name)]) end, - noreply(State); + reply(ok, State). handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> -- cgit v1.2.1 From 4013400d6787ac306c1c1c354a6db8d5a283bfba Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 10 Mar 2011 16:26:37 +0000 Subject: discovered another entire class of interleaving opportunities I've not considered. Fortunately, the fix turned out to be simple. ish. --- src/rabbit_mirror_queue_master.erl | 48 ++++++++++++++++++++++++++++---------- src/rabbit_mirror_queue_slave.erl | 7 +++--- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 704e62c1..388f5ce3 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -243,10 +243,30 @@ status(#state { backing_queue = BQ, backing_queue_state = BQS}) -> invoke(?MODULE, Fun, State) -> Fun(State); -invoke(Mod, Fun, State = #state { backing_queue = BQ, - backing_queue_state = BQS }) -> +invoke(Mod, Fun, State = #state { backing_queue = BQ, + backing_queue_state = BQS, + seen_status = SS }) -> {MsgIds, BQS1} = BQ:invoke(Mod, Fun, BQS), - {MsgIds, State #state { backing_queue_state = BQS1 }}. + {MsgIds1, SS1} = + lists:foldl( + fun (MsgId, {MsgIdsN, SSN}) -> + case dict:find(MsgId, SSN) of + error -> + {[MsgId | MsgIdsN], SSN}; + {ok, published} -> + %% It was published when we were a slave, + %% and we were promoted before we saw the + %% publish from the channel. We still + %% haven't seen the channel publish, and + %% consequently we need to filter out the + %% confirm here. We will issue the confirm + %% when we see the publish from the + %% channel. + {MsgIdsN, dict:store(MsgId, confirmed, SSN)} + end + end, {[], SS}, MsgIds), + {MsgIds1, State #state { backing_queue_state = BQS1, + seen_status = SS1 }}. validate_message(Message = #basic_message { id = MsgId }, State = #state { seen_status = SS, @@ -265,20 +285,24 @@ validate_message(Message = #basic_message { id = MsgId }, %% only if we ourselves are not filtering out the msg. {Result, BQS1} = BQ:validate_message(Message, BQS), {Result, State #state { backing_queue_state = BQS1 }}; - {ok, {published, _ChPid}} -> + {ok, published} -> %% It already got published when we were a slave and no %% confirmation is waiting. amqqueue_process will have, in %% its msg_id_to_channel mapping, the entry for dealing - %% with the confirm when that comes back in. The msg is - %% invalid. We will not see this again, nor will we be + %% with the confirm when that comes back in (it's added + %% immediately prior to calling validate_message). The msg + %% is invalid. We will not see this again, nor will we be %% further involved in confirming this message, so erase. {invalid, State #state { seen_status = dict:erase(MsgId, SS) }}; - {ok, {confirmed, _ChPid}} -> - %% It got confirmed before we became master, but we've - %% only just received the publish from the channel, so - %% couldn't previously know what the msg_seq_no was. Thus - %% confirm now. As above, amqqueue_process will have the - %% entry for the msg_id_to_channel mapping. + {ok, confirmed} -> + %% It got published when we were a slave via gm, and + %% confirmed some time after that (maybe even after + %% promotion), but before we received the publish from the + %% channel, so couldn't previously know what the + %% msg_seq_no was (and thus confirm as a slave). So we + %% need to confirm now. As above, amqqueue_process will + %% have the entry for the msg_id_to_channel mapping added + %% immediately prior to calling validate_message/2. ok = rabbit_amqqueue:maybe_run_queue_via_backing_queue_async( self(), ?MODULE, fun (State1) -> {[MsgId], State1} end), {invalid, State #state { seen_status = dict:erase(MsgId, SS) }} diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 950df509..d7f86456 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -411,10 +411,9 @@ promote_me(From, #state { q = Q, %% this does not affect MS, nor which bits go through to SS in %% Master, or MTC in queue_process. - SS = dict:filter(fun (_MsgId, {published, _ChPid}) -> true; - (_MsgId, {published, _ChPid, _MsgSeqNo}) -> false; - (_MsgId, {confirmed, _ChPid}) -> true - end, MS), + SS = dict:from_list([{MsgId, Status} + || {MsgId, {Status, _ChPid}} <- dict:to_list(MS), + Status =:= published orelse Status =:= confirmed]), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( CPid, BQ, BQS, GM, SS), -- cgit v1.2.1 From cf5cdaebf4b5364ed2a1a04a26ff2968de42c00b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 10 Mar 2011 16:30:53 +0000 Subject: Remove erroneous loss of state. Whilst this now can cause a memory leak, it only affects the master, the dict is not particularly rich, and it'll only be left with entries for dead channels (most likely channels that were on the old master). Also, we might very well be able to address this through other means --- src/rabbit_mirror_queue_master.erl | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 388f5ce3..54c718b1 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -141,8 +141,7 @@ dropwhile(Fun, State = #state { gm = GM, fetch(AckRequired, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = SetDelivered, - seen_status = SS }) -> + set_delivered = SetDelivered }) -> {Result, BQS1} = BQ:fetch(AckRequired, BQS), State1 = State #state { backing_queue_state = BQS1 }, case Result of @@ -153,13 +152,8 @@ fetch(AckRequired, State = #state { gm = GM, ok = gm:broadcast(GM, {fetch, AckRequired, MsgId, Remaining}), IsDelivered1 = IsDelivered orelse SetDelivered > 0, SetDelivered1 = lists:max([0, SetDelivered - 1]), - SS1 = case SetDelivered + SetDelivered1 of - 1 -> dict:new(); %% transition to empty - _ -> SS - end, {{Message, IsDelivered1, AckTag, Remaining}, - State1 #state { set_delivered = SetDelivered1, - seen_status = SS1 }} + State1 #state { set_delivered = SetDelivered1 }} end. ack(AckTags, State = #state { gm = GM, -- cgit v1.2.1 -- cgit v1.2.1 From 3ec780bc3ee6ab11be8dd920a9c1e2c495176e84 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Fri, 11 Mar 2011 11:29:54 +0000 Subject: Take write lock when counting records, reduces the constant factor during delete --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..6dfa1930 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -247,7 +247,7 @@ trie_binding_count(X, Node) -> _ = '_'}). count(Table, Match) -> - length(mnesia:match_object(Table, Match, read)). + length(mnesia:match_object(Table, Match, write)). trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, -- cgit v1.2.1 From eb8c2af1781252b6a1eb8d5d65d40f04f589d169 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 11 Mar 2011 12:27:33 +0000 Subject: Add ability to dynamically add slaves --- src/rabbit_control.erl | 6 +++ src/rabbit_mirror_queue_coordinator.erl | 21 +-------- src/rabbit_mirror_queue_master.erl | 2 +- src/rabbit_mirror_queue_misc.erl | 32 ++++++++++++- src/rabbit_mirror_queue_slave.erl | 81 +++++++++++++++------------------ 5 files changed, 77 insertions(+), 65 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index e2c050f5..604b1bfa 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -232,6 +232,12 @@ action(list_queues, Node, Args, Opts, Inform) -> [VHostArg, ArgAtoms]), ArgAtoms); +action(add_queue_mirror, Node, [Queue, MirrorNode], Opts, Inform) -> + Inform("Adding mirror of queue ~p on node ~p~n", [Queue, MirrorNode]), + VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + rpc_call(Node, rabbit_mirror_queue_misc, add_slave, + [VHostArg, list_to_binary(Queue), list_to_atom(MirrorNode)]); + action(list_exchanges, Node, Args, Opts, Inform) -> Inform("Listing exchanges", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index bd77c976..5fd07e60 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -16,7 +16,7 @@ -module(rabbit_mirror_queue_coordinator). --export([start_link/2, add_slave/2, get_gm/1]). +-export([start_link/2, get_gm/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). @@ -38,9 +38,6 @@ start_link(Queue, GM) -> gen_server2:start_link(?MODULE, [Queue, GM], []). -add_slave(CPid, SlaveNode) -> - gen_server2:call(CPid, {add_slave, SlaveNode}, infinity). - get_gm(CPid) -> gen_server2:call(CPid, get_gm, infinity). @@ -67,21 +64,7 @@ init([#amqqueue { name = QueueName } = Q, GM]) -> {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. handle_call(get_gm, _From, State = #state { gm = GM }) -> - reply(GM, State); - -handle_call({add_slave, Node}, _From, State = #state { q = Q }) -> - Nodes = nodes(), - case lists:member(Node, Nodes) of - true -> - Result = rabbit_mirror_queue_slave_sup:start_child(Node, [Q]), - rabbit_log:info("Adding slave node for ~s: ~p~n", - [rabbit_misc:rs(Q #amqqueue.name), Result]); - false -> - rabbit_log:info( - "Ignoring request to add slave on node ~p for ~s~n", - [Node, rabbit_misc:rs(Q #amqqueue.name)]) - end, - reply(ok, State). + reply(GM, State). handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 54c718b1..c5a2e88a 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -62,7 +62,7 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> _ -> [list_to_atom(binary_to_list(Node)) || {longstr, Node} <- Nodes] end, - [rabbit_mirror_queue_coordinator:add_slave(CPid, Node) || Node <- Nodes1], + [rabbit_mirror_queue_misc:add_slave(Q, Node) || Node <- Nodes1], {ok, BQ} = application:get_env(backing_queue_module), BQS = BQ:init(Q, Recover), #state { gm = GM, diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 090cb812..23d7c398 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -16,7 +16,7 @@ -module(rabbit_mirror_queue_misc). --export([remove_from_queue/2]). +-export([remove_from_queue/2, add_slave/2, add_slave/3]). -include("rabbit.hrl"). @@ -44,3 +44,33 @@ remove_from_queue(QueueName, DeadPids) -> end end end). + +add_slave(VHostPath, QueueName, MirrorNode) -> + add_slave(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). + +add_slave(Queue, MirrorNode) -> + rabbit_amqqueue:with( + Queue, + fun (#amqqueue { arguments = Args, name = Name, + pid = QPid, mirror_pids = MPids } = Q) -> + case rabbit_misc:table_lookup(Args, <<"x-mirror">>) of + undefined -> + ok; + _ -> + case [MirrorNode || Pid <- [QPid | MPids], + node(Pid) =:= MirrorNode] of + [] -> + Result = + rabbit_mirror_queue_slave_sup:start_child( + MirrorNode, [Q]), + rabbit_log:info("Adding slave node for ~s: ~p~n", + [rabbit_misc:rs(Name), Result]), + case Result of + {ok, _Pid} -> ok; + _ -> Result + end; + [_] -> + {error, queue_already_mirrored_on_node} + end + end + end). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index d7f86456..064dc329 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -76,46 +76,38 @@ init([#amqqueue { name = QueueName } = Q]) -> end, Self = self(), Node = node(), - case rabbit_misc:execute_mnesia_transaction( - fun () -> - [Q1 = #amqqueue { pid = QPid, mirror_pids = MPids }] = - mnesia:read({rabbit_queue, QueueName}), - case [Pid || Pid <- [QPid | MPids], node(Pid) =:= Node] of - [] -> - MPids1 = MPids ++ [Self], - mnesia:write(rabbit_queue, - Q1 #amqqueue { mirror_pids = MPids1 }, - write), - {ok, QPid}; - _ -> - {error, node_already_present} - end - end) of - {ok, MPid} -> - ok = file_handle_cache:register_callback( - rabbit_amqqueue, set_maximum_since_use, [self()]), - ok = rabbit_memory_monitor:register( - self(), {rabbit_amqqueue, set_ram_duration_target, - [self()]}), - {ok, BQ} = application:get_env(backing_queue_module), - BQS = BQ:init(Q, false), - {ok, #state { q = Q, - gm = GM, - master_node = node(MPid), - backing_queue = BQ, - backing_queue_state = BQS, - rate_timer_ref = undefined, - sync_timer_ref = undefined, - - sender_queues = dict:new(), - msg_id_ack = dict:new(), - msg_id_status = dict:new() - }, hibernate, - {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, - ?DESIRED_HIBERNATE}}; - {error, Error} -> - {stop, Error} - end. + {ok, MPid} = + rabbit_misc:execute_mnesia_transaction( + fun () -> + [Q1 = #amqqueue { pid = QPid, mirror_pids = MPids }] = + mnesia:read({rabbit_queue, QueueName}), + %% ASSERTION + [] = [Pid || Pid <- [QPid | MPids], node(Pid) =:= Node], + MPids1 = MPids ++ [Self], + mnesia:write(rabbit_queue, + Q1 #amqqueue { mirror_pids = MPids1 }, + write), + {ok, QPid} + end), + ok = file_handle_cache:register_callback( + rabbit_amqqueue, set_maximum_since_use, [self()]), + ok = rabbit_memory_monitor:register( + self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), + {ok, BQ} = application:get_env(backing_queue_module), + BQS = BQ:init(Q, false), + {ok, #state { q = Q, + gm = GM, + master_node = node(MPid), + backing_queue = BQ, + backing_queue_state = BQS, + rate_timer_ref = undefined, + sync_timer_ref = undefined, + + sender_queues = dict:new(), + msg_id_ack = dict:new(), + msg_id_status = dict:new() + }, hibernate, + {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) -> %% Synchronous, "immediate" delivery mode @@ -578,7 +570,7 @@ process_instruction( State1 = State #state { sender_queues = SQ1, msg_id_status = MS2 }, - %% we probably want to work in BQ:validate_message here + {ok, case Deliver of false -> @@ -649,10 +641,11 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, State #state { msg_id_ack = MA1, backing_queue_state = BQS1 }; false -> - %% the only thing we can safely do is nuke out our BQ - %% and MA + %% The only thing we can safely do is nuke out our BQ + %% and MA. The interaction between this and confirms + %% doesn't really bear thinking about... {_Count, BQS1} = BQ:purge(BQS), - {MsgIds, BQS2} = ack_all(BQ, MA, BQS1), + {_MsgIds, BQS2} = ack_all(BQ, MA, BQS1), State #state { msg_id_ack = dict:new(), backing_queue_state = BQS2 } end}; -- cgit v1.2.1 From cebd128e876c49e6d7e91da3ccc10aba1bb3c5b3 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Fri, 11 Mar 2011 14:06:21 +0000 Subject: Add timestamps to error_logger messages --- src/rabbit_error_logger.erl | 3 ++- src/rabbit_misc.erl | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 0120f0d6..33dfcef9 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -69,6 +69,7 @@ publish(_Other, _Format, _Data, _State) -> publish1(RoutingKey, Format, Data, LogExch) -> {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, - #'P_basic'{content_type = <<"text/plain">>}, + #'P_basic'{content_type = <<"text/plain">>, + timestamp = rabbit_misc:timestamp()}, list_to_binary(io_lib:format(Format, Data))), ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index e79a58a1..713498c8 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -52,7 +52,7 @@ unlink_and_capture_exit/1]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). --export([now_ms/0]). +-export([now_ms/0, timestamp/0]). -export([lock_file/1]). -export([const_ok/1, const/1]). -export([ntoa/1, ntoab/1]). @@ -190,6 +190,7 @@ {bad_edge, [digraph:vertex()]}), digraph:vertex(), digraph:vertex()})). -spec(now_ms/0 :: () -> non_neg_integer()). +-spec(timestamp/0 ::() -> non_neg_integer()). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). -spec(const_ok/1 :: (any()) -> 'ok'). -spec(const/1 :: (A) -> const(A)). @@ -199,6 +200,7 @@ -endif. +-define(EPOCH, {{1970, 1, 1}, {0, 0, 0}}). %%---------------------------------------------------------------------------- method_record_type(Record) -> @@ -791,6 +793,10 @@ get_flag(_, []) -> now_ms() -> timer:now_diff(now(), {0,0,0}) div 1000. +timestamp() -> + calendar:datetime_to_gregorian_seconds(erlang:universaltime()) - + calendar:datetime_to_gregorian_seconds(?EPOCH). + module_attributes(Module) -> case catch Module:module_info(attributes) of {'EXIT', {undef, [{Module, module_info, _} | _]}} -> -- cgit v1.2.1 From 2eac13788895c688a19e27a30f26001cf489491a Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 11 Mar 2011 15:50:12 +0000 Subject: Backed out changeset 2ac4e46ab7c0 changing the lock kind may have undesirable effects in a concurrent setting. The efficiency gains don't justify taking that risk, at least not without further investigation. --- src/rabbit_exchange_type_topic.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 6dfa1930..ffd1e583 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -247,7 +247,7 @@ trie_binding_count(X, Node) -> _ = '_'}). count(Table, Match) -> - length(mnesia:match_object(Table, Match, write)). + length(mnesia:match_object(Table, Match, read)). trie_remove_all_edges(X) -> remove_all(rabbit_topic_trie_edge, -- cgit v1.2.1 From 566ff20ae152accafd4992ccae0b007333f366fd Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 11 Mar 2011 16:21:05 +0000 Subject: Make new mirrors magically occur when set to [] and a new node in the cluster appears. Fix various other bits and pieces --- src/rabbit_mirror_queue_master.erl | 36 ++++++++++++++++++----------------- src/rabbit_mirror_queue_misc.erl | 18 +++++++++++++++++- src/rabbit_mirror_queue_slave_sup.erl | 6 ++++++ 3 files changed, 42 insertions(+), 18 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index c5a2e88a..25a1e4b8 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -53,7 +53,7 @@ stop() -> %% Same as start/1. exit({not_valid_for_generic_backing_queue, ?MODULE}). -init(#amqqueue { arguments = Args } = Q, Recover) -> +init(#amqqueue { arguments = Args, name = QName } = Q, Recover) -> {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, undefined), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), @@ -62,7 +62,7 @@ init(#amqqueue { arguments = Args } = Q, Recover) -> _ -> [list_to_atom(binary_to_list(Node)) || {longstr, Node} <- Nodes] end, - [rabbit_mirror_queue_misc:add_slave(Q, Node) || Node <- Nodes1], + [rabbit_mirror_queue_misc:add_slave(QName, Node) || Node <- Nodes1], {ok, BQ} = application:get_env(backing_queue_module), BQS = BQ:init(Q, Recover), #state { gm = GM, @@ -120,11 +120,11 @@ publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, %% Must use confirmed_broadcast here in order to guarantee that %% all slaves are forced to interpret this publish_delivered at %% the same point, especially if we die and a slave is promoted. - BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), ok = gm:confirmed_broadcast( GM, {publish, {true, AckRequired}, ChPid, MsgProps, Msg}), - BQS1 = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), - State #state { backing_queue_state = BQS1 }. + {AckTag, BQS1} = + BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), + {AckTag, State #state { backing_queue_state = BQS1 }}. dropwhile(Fun, State = #state { gm = GM, backing_queue = BQ, @@ -247,20 +247,22 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, case dict:find(MsgId, SSN) of error -> {[MsgId | MsgIdsN], SSN}; - {ok, published} -> - %% It was published when we were a slave, - %% and we were promoted before we saw the - %% publish from the channel. We still - %% haven't seen the channel publish, and - %% consequently we need to filter out the - %% confirm here. We will issue the confirm - %% when we see the publish from the - %% channel. - {MsgIdsN, dict:store(MsgId, confirmed, SSN)} + {ok, published} -> + %% It was published when we were a slave, + %% and we were promoted before we saw the + %% publish from the channel. We still + %% haven't seen the channel publish, and + %% consequently we need to filter out the + %% confirm here. We will issue the confirm + %% when we see the publish from the channel. + {MsgIdsN, dict:store(MsgId, confirmed, SSN)}; + {ok, confirmed} -> + %% Well, confirms are racy by definition. + {[MsgId | MsgIdsN], SSN} end - end, {[], SS}, MsgIds), + end, {[], SS}, MsgIds), {MsgIds1, State #state { backing_queue_state = BQS1, - seen_status = SS1 }}. + seen_status = SS1 }}. validate_message(Message = #basic_message { id = MsgId }, State = #state { seen_status = SS, diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 23d7c398..51c2a28a 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -16,7 +16,7 @@ -module(rabbit_mirror_queue_misc). --export([remove_from_queue/2, add_slave/2, add_slave/3]). +-export([remove_from_queue/2, add_slave/2, add_slave/3, on_node_up/0]). -include("rabbit.hrl"). @@ -74,3 +74,19 @@ add_slave(Queue, MirrorNode) -> end end end). + +on_node_up() -> + Qs = + rabbit_misc:execute_mnesia_transaction( + fun () -> + mnesia:foldl( + fun (#amqqueue{ arguments = Args, name = QName }, QsN) -> + case rabbit_misc:table_lookup( + Args, <<"x-mirror">>) of + {_Type, []} -> [QName | QsN]; + _ -> QsN + end + end, [], rabbit_queue) + end), + [add_slave(Q, node()) || Q <- Qs], + ok. diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl index 80c0520c..2fb3be51 100644 --- a/src/rabbit_mirror_queue_slave_sup.erl +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -22,6 +22,12 @@ {requires, queue_sup_queue_recovery}, {enables, routing_ready}]}). +-rabbit_boot_step({mirrored_queues, + [{description, "adding mirrors to queues"}, + {mfa, {rabbit_mirror_queue_misc, on_node_up, []}}, + {requires, mirror_queue_slave_sup}, + {enables, routing_ready}]}). + -behaviour(supervisor2). -export([start/0, start_link/0, start_child/2]). -- cgit v1.2.1 From 279e858cac439e493fe3990a4ef0ef689a0ff29b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 11 Mar 2011 17:19:24 +0000 Subject: Make slaves rejoin on boot --- src/rabbit_mirror_queue_misc.erl | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 51c2a28a..bf341c74 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -83,8 +83,17 @@ on_node_up() -> fun (#amqqueue{ arguments = Args, name = QName }, QsN) -> case rabbit_misc:table_lookup( Args, <<"x-mirror">>) of - {_Type, []} -> [QName | QsN]; - _ -> QsN + {_Type, []} -> + [QName | QsN]; + {_Type, Nodes} -> + Nodes1 = [list_to_atom(binary_to_list(Node)) + || {longstr, Node} <- Nodes], + case lists:member(node(), Nodes1) of + true -> [QName | QsN]; + false -> QsN + end; + _ -> + QsN end end, [], rabbit_queue) end), -- cgit v1.2.1 From 1d60ab309f7de265a4e8cc6ca905685e5ac04af9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 10:48:28 +0000 Subject: Fix --- src/file_handle_cache.erl | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index b26bb988..eed62729 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -970,12 +970,13 @@ queue_fold(Fun, Init, Q) -> filter_pending(Fun, {Count, Queue}) -> {Delta, Queue1} = - queue_fold(fun (Item, {DeltaN, QueueN}) -> - case Fun(Item) of - true -> {DeltaN, queue:in(Item, QueueN)}; - false -> {DeltaN - requested(Item), QueueN} - end - end, {0, queue:new()}, Queue), + queue_fold( + fun (Item = #pending { requested = Requested }, {DeltaN, QueueN}) -> + case Fun(Item) of + true -> {DeltaN, queue:in(Item, QueueN)}; + false -> {DeltaN - Requested, QueueN} + end + end, {0, queue:new()}, Queue), {Count + Delta, Queue1}. pending_new() -> @@ -1021,9 +1022,6 @@ adjust_alarm(OldState, NewState) -> end, NewState. -requested({_Kind, _Pid, Requested, _From}) -> - Requested. - process_pending(State = #fhc_state { limit = infinity }) -> State; process_pending(State) -> -- cgit v1.2.1 From 8492c18d696b0d4b384bfdc381d006e421351658 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Mar 2011 11:30:23 +0000 Subject: Maintain an acceptable level of code quality. --- src/rabbit_amqqueue_process.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 77706117..4f1f50a0 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -217,12 +217,11 @@ noreply(NewState) -> next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> {MsgIds, BQS1} = BQ:drain_confirmed(BQS), - BQNeedsSync = BQ:needs_idle_timeout(BQS1), State1 = ensure_stats_timer( ensure_rate_timer( confirm_messages(MsgIds, State#q{ backing_queue_state = BQS1}))), - case BQNeedsSync of + case BQ:needs_idle_timeout(BQS1) of true -> {ensure_sync_timer(State1), 0}; false -> {stop_sync_timer(State1), hibernate} end. -- cgit v1.2.1 From 09da0e495c4b327511c8cdbc763787fc409bcb81 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Mar 2011 11:43:15 +0000 Subject: I think this makes it more comprehensible. To me at least. --- src/rabbit_backing_queue.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 73850793..7823a53c 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -39,8 +39,9 @@ behaviour_info(callbacks) -> %% 2. a boolean indicating whether the queue is durable %% 3. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which can be invoked by the - %% backing queue when an event has occured that requires a + %% 4. an asynchronous callback which can be passed by the + %% backing queue to other processes which need to call back + %% into it when an event has occured that requires a %% state transition. The callback accepts a function from %% state to state. %% 5. a synchronous callback. Same as the asynchronous callback -- cgit v1.2.1 From 802d609c24e331f641e7afe7577676f88c22c46a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 14 Mar 2011 11:51:26 +0000 Subject: I didn't think that implied that it could *only* be used out of process, but let's make it clearer. --- src/rabbit_backing_queue.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 7823a53c..29d9331b 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -42,8 +42,8 @@ behaviour_info(callbacks) -> %% 4. an asynchronous callback which can be passed by the %% backing queue to other processes which need to call back %% into it when an event has occured that requires a - %% state transition. The callback accepts a function from - %% state to state. + %% state transition. Note that it can also be used in process. + %% The callback accepts a function from state to state. %% 5. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. {init, 5}, -- cgit v1.2.1 From dc873bed6f3634945a8881a88d0d52091018b33d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 12:05:04 +0000 Subject: Adjusted test to hit code path --- src/rabbit_tests.erl | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 9547cae5..c2ed3fb0 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1628,23 +1628,38 @@ test_file_handle_cache() -> ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), + Src = filename:join(TmpDir, "file1"), + Dst = filename:join(TmpDir, "file2"), + Content = <<"foo">>, + CopyFun = fun () -> + ok = file:write_file(Src, Content), + {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), + {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), + Size = size(Content), + {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size), + ok = file_handle_cache:delete(SrcHdl), + ok = file_handle_cache:delete(DstHdl) + end, Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open( filename:join(TmpDir, "file3"), [write], []), - receive close -> ok end, - file_handle_cache:delete(Hdl) + receive {next, Pid1} -> Pid1 ! {next, self()} end, + file_handle_cache:delete(Hdl), + %% This will block and never return, so we + %% exercise the fhc tidying up the pending + %% queue on the death of a process. + ok = CopyFun() end), - Src = filename:join(TmpDir, "file1"), - Dst = filename:join(TmpDir, "file2"), - Content = <<"foo">>, - ok = file:write_file(Src, Content), - {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), - {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), - Size = size(Content), - {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size), - ok = file_handle_cache:delete(SrcHdl), - file_handle_cache:delete(DstHdl), - Pid ! close, + ok = CopyFun(), + ok = file_handle_cache:set_limit(3), + Pid ! {next, self()}, + receive {next, Pid} -> ok end, + erlang:monitor(process, Pid), + timer:sleep(500), + exit(Pid, kill), + receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, + file:delete(Src), + file:delete(Dst), ok = file_handle_cache:set_limit(Limit), passed. -- cgit v1.2.1 From 80854415c2e1579d52127b9722c985c77d0791e4 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 14 Mar 2011 12:09:43 +0000 Subject: more docs --- src/rabbit_backing_queue.erl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 29d9331b..a15ff846 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -39,11 +39,13 @@ behaviour_info(callbacks) -> %% 2. a boolean indicating whether the queue is durable %% 3. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which can be passed by the - %% backing queue to other processes which need to call back - %% into it when an event has occured that requires a - %% state transition. Note that it can also be used in process. - %% The callback accepts a function from state to state. + %% 4. an asynchronous callback which accepts a function from + %% state to state and invokes it with the current backing + %% queue state. This is useful for handling events, e.g. when + %% the backing queue does not have its own process to receive + %% such events, or when the processing of an event results in + %% a state transition the queue logic needs to know about + %% (such as messages getting confirmed). %% 5. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. {init, 5}, -- cgit v1.2.1 From e4edc17159a885dc118938ccff3ffe5da93160d6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 16:56:52 +0000 Subject: Whoops - add missing catch case which I just hit in testing... --- src/gm.erl | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index fd8d9b77..8cf22581 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -931,6 +931,12 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group) -> prune_or_create_group(Self, GroupName)); Alive -> Left = lists:nth(random:uniform(length(Alive)), Alive), + Handler = + fun () -> + join_group( + Self, GroupName, + record_dead_member_in_group(Left, GroupName)) + end, try case gen_server2:call( Left, {add_on_right, Self}, infinity) of @@ -940,9 +946,10 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group) -> catch exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown -> - join_group( - Self, GroupName, - record_dead_member_in_group(Left, GroupName)) + Handler(); + exit:{{R, _}, _} + when R =:= nodedown; R =:= shutdown -> + Handler() end end end. -- cgit v1.2.1 From 0cad8ee6ed090daa4ef510762a7cc24bf7b38a3a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 17:55:47 +0000 Subject: Start the GC before we rebuild the index, and store it in the State --- src/rabbit_msg_store.erl | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 4f5d2411..1bc4fd6b 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -646,6 +646,15 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit), + {ok, GCPid} = rabbit_msg_store_gc:start_link( + #gc_state { dir = Dir, + index_module = IndexModule, + index_state = IndexState, + file_summary_ets = FileSummaryEts, + file_handles_ets = FileHandlesEts, + msg_store = self() + }), + State = #msstate { dir = Dir, index_module = IndexModule, index_state = IndexState, @@ -657,7 +666,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> sum_valid_data = 0, sum_file_size = 0, pending_gc_completion = orddict:new(), - gc_pid = undefined, + gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, dedup_cache_ets = DedupCacheEts, @@ -680,15 +689,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> {ok, Offset} = file_handle_cache:position(CurHdl, Offset), ok = file_handle_cache:truncate(CurHdl), - {ok, GCPid} = rabbit_msg_store_gc:start_link( - #gc_state { dir = Dir, - index_module = IndexModule, - index_state = IndexState, - file_summary_ets = FileSummaryEts, - file_handles_ets = FileHandlesEts, - msg_store = self() - }), - {ok, maybe_compact( State1 #msstate { current_file_handle = CurHdl, gc_pid = GCPid }), hibernate, -- cgit v1.2.1 From a465d0703f0a9269e1a7b9635a7c450ab8ee2e57 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 14 Mar 2011 19:27:20 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 1bc4fd6b..25a20a96 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -689,8 +689,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> {ok, Offset} = file_handle_cache:position(CurHdl, Offset), ok = file_handle_cache:truncate(CurHdl), - {ok, maybe_compact( - State1 #msstate { current_file_handle = CurHdl, gc_pid = GCPid }), + {ok, maybe_compact(State1 #msstate { current_file_handle = CurHdl }), hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -- cgit v1.2.1 From 9af92c9bf4c5f05d59353206a668c25f2443b7bc Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 14 Mar 2011 19:28:20 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 4f5d2411..7cc499d1 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -716,15 +716,15 @@ handle_call(successfully_recovered_state, _From, State) -> reply(State #msstate.successfully_recovered, State); handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, - State = #msstate { dir = Dir, - index_state = IndexState, - index_module = IndexModule, - file_handles_ets = FileHandlesEts, - file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, - cur_file_cache_ets = CurFileCacheEts, - clients = Clients, - gc_pid = GCPid }) -> + State = #msstate { dir = Dir, + index_state = IndexState, + index_module = IndexModule, + file_handles_ets = FileHandlesEts, + file_summary_ets = FileSummaryEts, + dedup_cache_ets = DedupCacheEts, + cur_file_cache_ets = CurFileCacheEts, + clients = Clients, + gc_pid = GCPid }) -> Clients1 = dict:store(CRef, {MsgOnDiskFun, CloseFDsFun}, Clients), reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts}, -- cgit v1.2.1 From 0b06bcaf2eed27870dcdf16e538c88751eee7527 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 23:34:29 +0000 Subject: fix --- src/file_handle_cache.erl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index b26bb988..e8e86c7c 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1149,11 +1149,14 @@ notify_age(CStates, AverageAge) -> end, CStates). notify_age0(Clients, CStates, Required) -> - Notifications = - [CState || CState <- CStates, CState#cstate.callback =/= undefined], - {L1, L2} = lists:split(random:uniform(length(Notifications)), - Notifications), - notify(Clients, Required, L2 ++ L1). + case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of + [] -> + ok; + Notifications -> + {L1, L2} = lists:split(random:uniform(length(Notifications)), + Notifications), + notify(Clients, Required, L2 ++ L1) + end. notify(_Clients, _Required, []) -> ok; -- cgit v1.2.1 From 6d7121d192dd59108c58574b63db04ec8a34c345 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 14 Mar 2011 23:48:35 +0000 Subject: Ensure we hit both branches of fhc:filter_pending --- src/rabbit_tests.erl | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index d5956c4c..87c905d7 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1628,10 +1628,12 @@ test_file_handle_cache() -> ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), - Src = filename:join(TmpDir, "file1"), - Dst = filename:join(TmpDir, "file2"), + Src1 = filename:join(TmpDir, "file1"), + Dst1 = filename:join(TmpDir, "file2"), + Src2 = filename:join(TmpDir, "file3"), + Dst2 = filename:join(TmpDir, "file4"), Content = <<"foo">>, - CopyFun = fun () -> + CopyFun = fun (Src, Dst) -> ok = file:write_file(Src, Content), {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), @@ -1648,18 +1650,22 @@ test_file_handle_cache() -> %% This will block and never return, so we %% exercise the fhc tidying up the pending %% queue on the death of a process. - ok = CopyFun() + ok = CopyFun(Src1, Dst1) end), - ok = CopyFun(), - ok = file_handle_cache:set_limit(3), + ok = CopyFun(Src1, Dst1), + ok = file_handle_cache:set_limit(2), Pid ! {next, self()}, receive {next, Pid} -> ok end, + timer:sleep(100), + Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end), + timer:sleep(100), erlang:monitor(process, Pid), - timer:sleep(500), + erlang:monitor(process, Pid1), exit(Pid, kill), + exit(Pid1, kill), receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, - file:delete(Src), - file:delete(Dst), + receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end, + [file:delete(File) || File <- [Src1, Dst1, Src2, Dst2]], ok = file_handle_cache:set_limit(Limit), passed. -- cgit v1.2.1 From cc5413d3b863c14ccca00b2a4feb9c99589da170 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 15 Mar 2011 17:00:28 +0000 Subject: Read segment files in one go --- src/rabbit_queue_index.erl | 79 ++++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 38 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 8227e4cd..d3a82fbf 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -545,20 +545,22 @@ expiry_to_binary(Expiry) -> <>. read_pub_record_body(Hdl) -> case file_handle_cache:read(Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of - {ok, Bin} -> - %% work around for binary data fragmentation. See - %% rabbit_msg_file:read_next/2 - <> = Bin, - <> = <>, - Exp = case Expiry of - ?NO_EXPIRY -> undefined; - X -> X - end, - {MsgId, #message_properties{expiry = Exp}}; - Error -> - Error + {ok, Bin} -> {MsgId, MsgProps, <<>>} = extract_pub_record_body(Bin), + {MsgId, MsgProps}; + Error -> Error end. +extract_pub_record_body(<>) -> + %% work around for binary data fragmentation. See + %% rabbit_msg_file:read_next/2 + <> = <>, + Exp = case Expiry of + ?NO_EXPIRY -> undefined; + X -> X + end, + {MsgId, #message_properties{expiry = Exp}, Rest}. + %%---------------------------------------------------------------------------- %% journal manipulation %%---------------------------------------------------------------------------- @@ -845,36 +847,37 @@ load_segment(KeepAcked, #segment { path = Path }) -> false -> {array_new(), 0}; true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []), {ok, 0} = file_handle_cache:position(Hdl, bof), - Res = load_segment_entries(KeepAcked, Hdl, array_new(), 0), + {ok, SegData} = file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE), + Res = load_segment_entries(KeepAcked, SegData, array_new(), 0), ok = file_handle_cache:close(Hdl), Res end. -load_segment_entries(KeepAcked, Hdl, SegEntries, UnackedCount) -> - case file_handle_cache:read(Hdl, ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES) of - {ok, <>} -> - {MsgId, MsgProps} = read_pub_record_body(Hdl), - Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, - SegEntries1 = array:set(RelSeq, Obj, SegEntries), - load_segment_entries(KeepAcked, Hdl, SegEntries1, - UnackedCount + 1); - {ok, <>} -> - {UnackedCountDelta, SegEntries1} = - case array:get(RelSeq, SegEntries) of - {Pub, no_del, no_ack} -> - { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)}; - {Pub, del, no_ack} when KeepAcked -> - {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)}; - {_Pub, del, no_ack} -> - {-1, array:reset(RelSeq, SegEntries)} - end, - load_segment_entries(KeepAcked, Hdl, SegEntries1, - UnackedCount + UnackedCountDelta); - _ErrOrEoF -> - {SegEntries, UnackedCount} - end. +load_segment_entries(KeepAcked, + <>, + SegEntries, UnackedCount) -> + {MsgId, MsgProps, SegData1} = extract_pub_record_body(SegData), + Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, + SegEntries1 = array:set(RelSeq, Obj, SegEntries), + load_segment_entries(KeepAcked, SegData1, SegEntries1, UnackedCount + 1); +load_segment_entries(KeepAcked, + <>, + SegEntries, UnackedCount) -> + {UnackedCountDelta, SegEntries1} = + case array:get(RelSeq, SegEntries) of + {Pub, no_del, no_ack} -> + { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)}; + {Pub, del, no_ack} when KeepAcked -> + {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)}; + {_Pub, del, no_ack} -> + {-1, array:reset(RelSeq, SegEntries)} + end, + load_segment_entries(KeepAcked, SegData, SegEntries1, + UnackedCount + UnackedCountDelta); +load_segment_entries(_KeepAcked, _SegData, SegEntries, UnackedCount) -> + {SegEntries, UnackedCount}. array_new() -> array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]). -- cgit v1.2.1 From 2919b96d6576a0fa2bbe9c075d200c3061027850 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 15 Mar 2011 17:21:19 +0000 Subject: cosmetic --- src/file_handle_cache.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl index e8e86c7c..4f036571 100644 --- a/src/file_handle_cache.erl +++ b/src/file_handle_cache.erl @@ -1150,12 +1150,10 @@ notify_age(CStates, AverageAge) -> notify_age0(Clients, CStates, Required) -> case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of - [] -> - ok; - Notifications -> - {L1, L2} = lists:split(random:uniform(length(Notifications)), - Notifications), - notify(Clients, Required, L2 ++ L1) + [] -> ok; + Notifications -> S = random:uniform(length(Notifications)), + {L1, L2} = lists:split(S, Notifications), + notify(Clients, Required, L2 ++ L1) end. notify(_Clients, _Required, []) -> -- cgit v1.2.1 From 0e03d63fa6b9236744374041738f9c59182be325 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 15 Mar 2011 19:26:15 +0000 Subject: initialise #connection.capabilities - not strictly necessary, but good form --- src/rabbit_reader.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 5afe5560..609bb43f 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -201,7 +201,8 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, timeout_sec = ?HANDSHAKE_TIMEOUT, frame_max = ?FRAME_MIN_SIZE, vhost = none, - client_properties = none}, + client_properties = none, + capabilities = []}, callback = uninitialized_callback, recv_length = 0, recv_ref = none, -- cgit v1.2.1 From eb0205d4bc8408d63736b13a6f7e6f5ba2879eee Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 15 Mar 2011 20:00:56 +0000 Subject: inlining and some minor refactoring --- src/rabbit_queue_index.erl | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index d3a82fbf..c342101c 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -543,13 +543,6 @@ create_pub_record_body(MsgId, #message_properties{expiry = Expiry}) -> expiry_to_binary(undefined) -> <>; expiry_to_binary(Expiry) -> <>. -read_pub_record_body(Hdl) -> - case file_handle_cache:read(Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of - {ok, Bin} -> {MsgId, MsgProps, <<>>} = extract_pub_record_body(Bin), - {MsgId, MsgProps}; - Error -> Error - end. - extract_pub_record_body(<>) -> %% work around for binary data fragmentation. See @@ -682,15 +675,18 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> ?ACK_JPREFIX -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> - case read_pub_record_body(Hdl) of - {MsgId, MsgProps} -> - Publish = {MsgId, MsgProps, - case Prefix of - ?PUB_PERSIST_JPREFIX -> true; - ?PUB_TRANS_JPREFIX -> false - end}, + case file_handle_cache:read( + Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of + {ok, Bin} -> + {MsgId, MsgProps, <<>>} = + extract_pub_record_body(Bin), + IsPersistent = case Prefix of + ?PUB_PERSIST_JPREFIX -> true; + ?PUB_TRANS_JPREFIX -> false + end, load_journal_entries( - add_to_journal(SeqId, Publish, State)); + add_to_journal( + SeqId, {MsgId, MsgProps, IsPersistent}, State)); _ErrOrEoF -> %% err, we've lost at least a publish State end -- cgit v1.2.1 From 11681ae6031b69432626e7d92a699b07dd021c95 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 07:18:40 +0000 Subject: cosmetic --- src/rabbit_auth_backend_internal.erl | 24 ++++++++++-------------- src/rabbit_auth_mechanism_cr_demo.erl | 12 +++++------- src/rabbit_auth_mechanism_plain.erl | 5 +---- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index 3d005845..f70813d1 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -85,10 +85,9 @@ check_user_login(Username, []) -> internal_check_user_login(Username, fun(_) -> true end); check_user_login(Username, [{password, Password}]) -> internal_check_user_login( - Username, - fun(#internal_user{password_hash = Hash}) -> - check_password(Password, Hash) - end); + Username, fun(#internal_user{password_hash = Hash}) -> + check_password(Password, Hash) + end); check_user_login(Username, AuthProps) -> exit({unknown_auth_props, Username, AuthProps}). @@ -131,12 +130,11 @@ check_resource_access(#user{username = Username}, [] -> false; [#user_permission{permission = P}] -> - PermRegexp = - case element(permission_index(Permission), P) of - %% <<"^$">> breaks Emacs' erlang mode - <<"">> -> <<$^, $$>>; - RE -> RE - end, + PermRegexp = case element(permission_index(Permission), P) of + %% <<"^$">> breaks Emacs' erlang mode + <<"">> -> <<$^, $$>>; + RE -> RE + end, case re:run(Name, PermRegexp, [{capture, none}]) of match -> true; nomatch -> false @@ -221,11 +219,9 @@ salted_md5(Salt, Cleartext) -> Salted = <>, erlang:md5(Salted). -set_admin(Username) -> - set_admin(Username, true). +set_admin(Username) -> set_admin(Username, true). -clear_admin(Username) -> - set_admin(Username, false). +clear_admin(Username) -> set_admin(Username, false). set_admin(Username, IsAdmin) -> R = update_user(Username, fun(User) -> diff --git a/src/rabbit_auth_mechanism_cr_demo.erl b/src/rabbit_auth_mechanism_cr_demo.erl index 77aa34ea..acbb6e48 100644 --- a/src/rabbit_auth_mechanism_cr_demo.erl +++ b/src/rabbit_auth_mechanism_cr_demo.erl @@ -53,10 +53,8 @@ handle_response(Response, State = #state{username = undefined}) -> {challenge, <<"Please tell me your password">>, State#state{username = Response}}; -handle_response(Response, #state{username = Username}) -> - case Response of - <<"My password is ", Password/binary>> -> - rabbit_access_control:check_user_pass_login(Username, Password); - _ -> - {protocol_error, "Invalid response '~s'", [Response]} - end. +handle_response(<<"My password is ", Password/binary>>, + #state{username = Username}) -> + rabbit_access_control:check_user_pass_login(Username, Password); +handle_response(Response, _State) -> + {protocol_error, "Invalid response '~s'", [Response]}. diff --git a/src/rabbit_auth_mechanism_plain.erl b/src/rabbit_auth_mechanism_plain.erl index e2f9bff9..2448acb6 100644 --- a/src/rabbit_auth_mechanism_plain.erl +++ b/src/rabbit_auth_mechanism_plain.erl @@ -65,15 +65,12 @@ extract_user_pass(Response) -> end. extract_elem(<<0:8, Rest/binary>>) -> - Count = next_null_pos(Rest), + Count = next_null_pos(Rest, 0), <> = Rest, {ok, Elem, Rest1}; extract_elem(_) -> error. -next_null_pos(Bin) -> - next_null_pos(Bin, 0). - next_null_pos(<<>>, Count) -> Count; next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count; next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1). -- cgit v1.2.1 From 8422782861d5a7bef197d048f0b59da14b516d37 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 07:44:29 +0000 Subject: cosmetic --- src/rabbit_queue_index.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index c342101c..75423f80 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -843,7 +843,8 @@ load_segment(KeepAcked, #segment { path = Path }) -> false -> {array_new(), 0}; true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []), {ok, 0} = file_handle_cache:position(Hdl, bof), - {ok, SegData} = file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE), + {ok, SegData} = file_handle_cache:read( + Hdl, ?SEGMENT_TOTAL_SIZE), Res = load_segment_entries(KeepAcked, SegData, array_new(), 0), ok = file_handle_cache:close(Hdl), Res -- cgit v1.2.1 From de97e192c944e6d4e1d1917808f41a5ede61e642 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 09:06:46 +0000 Subject: avoid sub-binary construction of tail and some cosmetics --- src/rabbit_queue_index.erl | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 75423f80..83079ca8 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -140,8 +140,11 @@ -define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes -define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)). -%% 16 bytes for md5sum + 8 for expiry + 2 for seq, bits and prefix --define(PUBLISH_RECORD_LENGTH_BYTES, ?MSG_ID_BYTES + ?EXPIRY_BYTES + 2). + +%% 16 bytes for md5sum + 8 for expiry +-define(PUBLISH_RECORD_BODY_LENGTH_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)). +%% + 2 for seq, bits and prefix +-define(PUBLISH_RECORD_LENGTH_BYTES, (?PUBLISH_RECORD_BODY_LENGTH_BYTES + 2)). %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * @@ -537,14 +540,13 @@ queue_index_walker_reader(QueueName, Gatherer) -> %% expiry/binary manipulation %%---------------------------------------------------------------------------- -create_pub_record_body(MsgId, #message_properties{expiry = Expiry}) -> +create_pub_record_body(MsgId, #message_properties { expiry = Expiry }) -> [MsgId, expiry_to_binary(Expiry)]. expiry_to_binary(undefined) -> <>; expiry_to_binary(Expiry) -> <>. -extract_pub_record_body(<>) -> +parse_pub_record_body(<>) -> %% work around for binary data fragmentation. See %% rabbit_msg_file:read_next/2 <> = <>, @@ -552,7 +554,7 @@ extract_pub_record_body(< undefined; X -> X end, - {MsgId, #message_properties{expiry = Exp}, Rest}. + {MsgId, #message_properties { expiry = Exp }}. %%---------------------------------------------------------------------------- %% journal manipulation @@ -676,10 +678,9 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> case file_handle_cache:read( - Hdl, ?MSG_ID_BYTES + ?EXPIRY_BYTES) of + Hdl, ?PUBLISH_RECORD_BODY_LENGTH_BYTES) of {ok, Bin} -> - {MsgId, MsgProps, <<>>} = - extract_pub_record_body(Bin), + {MsgId, MsgProps} = parse_pub_record_body(Bin), IsPersistent = case Prefix of ?PUB_PERSIST_JPREFIX -> true; ?PUB_TRANS_JPREFIX -> false @@ -852,12 +853,14 @@ load_segment(KeepAcked, #segment { path = Path }) -> load_segment_entries(KeepAcked, <>, + RelSeq:?REL_SEQ_BITS, + PubRecordBody:?PUBLISH_RECORD_BODY_LENGTH_BYTES/binary, + SegData/binary>>, SegEntries, UnackedCount) -> - {MsgId, MsgProps, SegData1} = extract_pub_record_body(SegData), + {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody), Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack}, SegEntries1 = array:set(RelSeq, Obj, SegEntries), - load_segment_entries(KeepAcked, SegData1, SegEntries1, UnackedCount + 1); + load_segment_entries(KeepAcked, SegData, SegEntries1, UnackedCount + 1); load_segment_entries(KeepAcked, <>, -- cgit v1.2.1 From 7108a811678ee47166482095d7715a7196ed413f Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 16 Mar 2011 09:20:37 +0000 Subject: cosmetic: more consistent naming of constants --- src/rabbit_queue_index.erl | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 83079ca8..33c5391b 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -126,13 +126,13 @@ %% (range: 0 - 16383) -define(REL_SEQ_ONLY_PREFIX, 00). -define(REL_SEQ_ONLY_PREFIX_BITS, 2). --define(REL_SEQ_ONLY_ENTRY_LENGTH_BYTES, 2). +-define(REL_SEQ_ONLY_RECORD_BYTES, 2). %% publish record is binary 1 followed by a bit for is_persistent, %% then 14 bits of rel seq id, 64 bits for message expiry and 128 bits %% of md5sum msg id --define(PUBLISH_PREFIX, 1). --define(PUBLISH_PREFIX_BITS, 1). +-define(PUB_PREFIX, 1). +-define(PUB_PREFIX_BITS, 1). -define(EXPIRY_BYTES, 8). -define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)). @@ -142,14 +142,13 @@ -define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)). %% 16 bytes for md5sum + 8 for expiry --define(PUBLISH_RECORD_BODY_LENGTH_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)). +-define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)). %% + 2 for seq, bits and prefix --define(PUBLISH_RECORD_LENGTH_BYTES, (?PUBLISH_RECORD_BODY_LENGTH_BYTES + 2)). +-define(PUB_RECORD_BYTES, (?PUB_RECORD_BODY_BYTES + 2)). %% 1 publish, 1 deliver, 1 ack per msg -define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT * - (?PUBLISH_RECORD_LENGTH_BYTES + - (2 * ?REL_SEQ_ONLY_ENTRY_LENGTH_BYTES))). + (?PUB_RECORD_BYTES + (2 * ?REL_SEQ_ONLY_RECORD_BYTES))). %% ---- misc ---- @@ -677,8 +676,7 @@ load_journal_entries(State = #qistate { journal_handle = Hdl }) -> ?ACK_JPREFIX -> load_journal_entries(add_to_journal(SeqId, ack, State)); _ -> - case file_handle_cache:read( - Hdl, ?PUBLISH_RECORD_BODY_LENGTH_BYTES) of + case file_handle_cache:read(Hdl, ?PUB_RECORD_BODY_BYTES) of {ok, Bin} -> {MsgId, MsgProps} = parse_pub_record_body(Bin), IsPersistent = case Prefix of @@ -797,7 +795,7 @@ write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) -> ok; {MsgId, MsgProps, IsPersistent} -> file_handle_cache:append( - Hdl, [<>, create_pub_record_body(MsgId, MsgProps)]) @@ -852,9 +850,9 @@ load_segment(KeepAcked, #segment { path = Path }) -> end. load_segment_entries(KeepAcked, - <>, SegEntries, UnackedCount) -> {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody), @@ -1011,11 +1009,11 @@ add_queue_ttl_journal(< stop. -add_queue_ttl_segment(<>) -> - {[<>, MsgId, expiry_to_binary(undefined)], Rest}; + {[<>, + MsgId, expiry_to_binary(undefined)], Rest}; add_queue_ttl_segment(<>) -> {<>, -- cgit v1.2.1 From ade1d061c1f3ac97a02324f121e8ac1b03311ff5 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 16 Mar 2011 10:58:31 +0000 Subject: don't record anything confirm-related for immediate unroutable messages --- src/rabbit_amqqueue_process.erl | 76 +++++++++++++++++++++++++++-------------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 7c4b5190..5dbc8828 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -439,19 +439,24 @@ gb_trees_cons(Key, Value, Tree) -> none -> gb_trees:insert(Key, [Value], Tree) end. -record_confirm_message(#delivery{msg_seq_no = undefined}, State) -> - {never, State}; -record_confirm_message(#delivery{sender = ChPid, +should_confirm_message(#delivery{msg_seq_no = undefined}, _State) -> + never; +should_confirm_message(#delivery{sender = ChPid, msg_seq_no = MsgSeqNo, message = #basic_message { is_persistent = true, id = MsgId}}, - State = #q{q = #amqqueue{durable = true}, - msg_id_to_channel = MTC}) -> - {eventually, - State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}}; -record_confirm_message(_Delivery, State) -> - {immediately, State}. + #q{q = #amqqueue{durable = true}}) -> + {eventually, ChPid, MsgSeqNo, MsgId}; +should_confirm_message(_Delivery, _State) -> + immediately. + +record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, + State = #q{msg_id_to_channel = MTC}) -> + State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}; +record_confirm_message(Confirm, State) + when Confirm =:= immediately orelse Confirm =:= never -> + State. run_message_queue(State) -> Funs = {fun deliver_from_queue_pred/2, @@ -466,8 +471,9 @@ attempt_delivery(#delivery{txn = none, sender = ChPid, message = Message, msg_seq_no = MsgSeqNo}, - {NeedsConfirming, State = #q{backing_queue = BQ}}) -> - case NeedsConfirming of + Confirm, + State = #q{backing_queue = BQ}) -> + case Confirm of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok end, @@ -477,40 +483,50 @@ attempt_delivery(#delivery{txn = none, %% we don't need an expiry here because messages are %% not being enqueued, so we use an empty %% message_properties. + NeedsConfirming = case Confirm of + {eventually, _, _, _} -> true; + _ -> false + end, {AckTag, BQS1} = BQ:publish_delivered( AckRequired, Message, (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = (NeedsConfirming =:= eventually)}, + needs_confirming = NeedsConfirming}, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} end, {Delivered, State1} = deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), - {Delivered, NeedsConfirming, State1}; + {Delivered, Confirm, State1}; attempt_delivery(#delivery{txn = Txn, sender = ChPid, message = Message}, - {NeedsConfirming, State = #q{backing_queue = BQ, - backing_queue_state = BQS}}) -> + Confirm, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), - {true, NeedsConfirming, State#q{backing_queue_state = BQS1}}. + {true, Confirm, State#q{backing_queue_state = BQS1}}. deliver_or_enqueue(Delivery, State) -> - case attempt_delivery(Delivery, record_confirm_message(Delivery, State)) of - {true, _, State1} -> - State1; - {false, NeedsConfirming, State1 = #q{backing_queue = BQ, + case attempt_delivery(Delivery, + should_confirm_message(Delivery, State), State) of + {true, Confirm, State1} -> + record_confirm_message(Confirm, State1); + {false, Confirm, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, + NeedsConfirming = case Confirm of + {eventually, _, _, _} -> true; + _ -> false + end, BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ - needs_confirming = - (NeedsConfirming =:= eventually)}, + needs_confirming = NeedsConfirming}, BQS), - ensure_ttl_timer(State1#q{backing_queue_state = BQS1}) + State2 = record_confirm_message(Confirm, State1), + ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> @@ -829,9 +845,17 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Delivered, _NeedsConfirming, State1} = - attempt_delivery(Delivery, record_confirm_message(Delivery, State)), - reply(Delivered, State1); + {Delivered, Confirm, State1} = + attempt_delivery(Delivery, + should_confirm_message(Delivery, State), + State), + State2 = case {Confirm, Delivered} of + {{eventually, _, _, _}, true} -> + record_confirm_message(Confirm, State); + _ -> + State1 + end, + reply(Delivered, State2); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. -- cgit v1.2.1 From 157d3f401c729df060b32327385211b27a0e0105 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 16 Mar 2011 11:19:17 +0000 Subject: refactor --- src/rabbit_amqqueue_process.erl | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 5dbc8828..96352c13 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -451,6 +451,9 @@ should_confirm_message(#delivery{sender = ChPid, should_confirm_message(_Delivery, _State) -> immediately. +needs_confirming({eventually, _, _, _}) -> true; +needs_confirming(_) -> false. + record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, State = #q{msg_id_to_channel = MTC}) -> State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}; @@ -483,15 +486,11 @@ attempt_delivery(#delivery{txn = none, %% we don't need an expiry here because messages are %% not being enqueued, so we use an empty %% message_properties. - NeedsConfirming = case Confirm of - {eventually, _, _, _} -> true; - _ -> false - end, {AckTag, BQS1} = BQ:publish_delivered( AckRequired, Message, (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = NeedsConfirming}, + needs_confirming = needs_confirming(Confirm)}, BQS), {{Message, false, AckTag}, true, State1#q{backing_queue_state = BQS1}} @@ -517,13 +516,9 @@ deliver_or_enqueue(Delivery, State) -> {false, Confirm, State1 = #q{backing_queue = BQ, backing_queue_state = BQS}} -> #delivery{message = Message} = Delivery, - NeedsConfirming = case Confirm of - {eventually, _, _, _} -> true; - _ -> false - end, BQS1 = BQ:publish(Message, (message_properties(State)) #message_properties{ - needs_confirming = NeedsConfirming}, + needs_confirming = needs_confirming(Confirm)}, BQS), State2 = record_confirm_message(Confirm, State1), ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) @@ -849,11 +844,9 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> attempt_delivery(Delivery, should_confirm_message(Delivery, State), State), - State2 = case {Confirm, Delivered} of - {{eventually, _, _, _}, true} -> - record_confirm_message(Confirm, State); - _ -> - State1 + State2 = case Delivered andalso needs_confirming(Confirm) of + true -> record_confirm_message(Confirm, State); + false -> State1 end, reply(Delivered, State2); -- cgit v1.2.1 From 96ed87d84faf2fa623c0d34cdfa4ec4bb32d9ea4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 12:44:13 +0000 Subject: Made gm do batching of messages. This has an astonishing performance impact: if every broadcast msg to the gm results in network activity then performance is low - presumably serialisation of, and network broadcast of small messages is very inefficient. By batching broadcasts and then sending many on a timer, performance is much much higher. --- src/gm.erl | 134 +++++++++++++++++++++++++++++++++++---------------- src/gm_soak_test.erl | 8 +-- 2 files changed, 96 insertions(+), 46 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 8cf22581..5b3623cf 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -376,15 +376,16 @@ confirmed_broadcast/2, group_members/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, prioritise_info/2]). + code_change/3, prioritise_cast/2, prioritise_info/2]). -export([behaviour_info/1]). --export([table_definitions/0]). +-export([table_definitions/0, flush/1]). -define(GROUP_TABLE, gm_group). -define(HIBERNATE_AFTER_MIN, 1000). -define(DESIRED_HIBERNATE, 10000). +-define(BROADCAST_TIMER, 25). -define(SETS, ordsets). -define(DICT, orddict). @@ -398,7 +399,9 @@ pub_count, members_state, callback_args, - confirms + confirms, + broadcast_buffer, + broadcast_timer }). -record(gm_group, { name, version, members }). @@ -508,21 +511,26 @@ confirmed_broadcast(Server, Msg) -> group_members(Server) -> gen_server2:call(Server, group_members, infinity). +flush(Server) -> + gen_server2:cast(Server, flush). + init([GroupName, Module, Args]) -> random:seed(now()), gen_server2:cast(self(), join), Self = self(), - {ok, #state { self = Self, - left = {Self, undefined}, - right = {Self, undefined}, - group_name = GroupName, - module = Module, - view = undefined, - pub_count = 0, - members_state = undefined, - callback_args = Args, - confirms = queue:new() }, hibernate, + {ok, #state { self = Self, + left = {Self, undefined}, + right = {Self, undefined}, + group_name = GroupName, + module = Module, + view = undefined, + pub_count = 0, + members_state = undefined, + callback_args = Args, + confirms = queue:new(), + broadcast_buffer = [], + broadcast_timer = undefined }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -620,7 +628,11 @@ handle_cast(join, State = #state { self = Self, {Module:joined(Args, all_known_members(View)), State1}); handle_cast(leave, State) -> - {stop, normal, State}. + {stop, normal, State}; + +handle_cast(flush, State) -> + noreply( + flush_broadcast_buffer(State #state { broadcast_timer = undefined })). handle_info({'DOWN', MRef, process, _Pid, _Reason}, @@ -662,14 +674,17 @@ handle_info({'DOWN', MRef, process, _Pid, _Reason}, end. -terminate(Reason, #state { module = Module, - callback_args = Args }) -> +terminate(Reason, State = #state { module = Module, + callback_args = Args }) -> + flush_broadcast_buffer(State), Module:terminate(Args, Reason). code_change(_OldVsn, State, _Extra) -> {ok, State}. +prioritise_cast(flush, _State) -> 1; +prioritise_cast(_ , _State) -> 0. prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; prioritise_info(_ , _State) -> 0. @@ -782,33 +797,62 @@ handle_msg({activity, _NotLeft, _Activity}, State) -> noreply(State) -> - {noreply, State, hibernate}. + {noreply, ensure_broadcast_timer(State), hibernate}. reply(Reply, State) -> - {reply, Reply, State, hibernate}. - -internal_broadcast(Msg, From, State = #state { self = Self, - pub_count = PubCount, - members_state = MembersState, - module = Module, - confirms = Confirms, - callback_args = Args }) -> - PubMsg = {PubCount, Msg}, - Activity = activity_cons(Self, [PubMsg], [], activity_nil()), - ok = maybe_send_activity(activity_finalise(Activity), State), - MembersState1 = - with_member( - fun (Member = #member { pending_ack = PA }) -> - Member #member { pending_ack = queue:in(PubMsg, PA) } - end, Self, MembersState), + {reply, Reply, ensure_broadcast_timer(State), hibernate}. + +ensure_broadcast_timer(State = #state { broadcast_buffer = [], + broadcast_timer = undefined }) -> + State; +ensure_broadcast_timer(State = #state { broadcast_buffer = [], + broadcast_timer = TRef }) -> + timer:cancel(TRef), + State #state { broadcast_timer = undefined }; +ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> + {ok, TRef} = timer:apply_after(?BROADCAST_TIMER, ?MODULE, flush, [self()]), + State #state { broadcast_timer = TRef }; +ensure_broadcast_timer(State) -> + State. + +internal_broadcast(Msg, From, State = #state { self = Self, + pub_count = PubCount, + module = Module, + confirms = Confirms, + callback_args = Args, + broadcast_buffer = Buffer }) -> + Result = Module:handle_msg(Args, Self, Msg), + Buffer1 = [{PubCount, Msg} | Buffer], Confirms1 = case From of none -> Confirms; _ -> queue:in({PubCount, From}, Confirms) end, - handle_callback_result({Module:handle_msg(Args, Self, Msg), - State #state { pub_count = PubCount + 1, - members_state = MembersState1, - confirms = Confirms1 }}). + State1 = State #state { pub_count = PubCount + 1, + confirms = Confirms1, + broadcast_buffer = Buffer1 }, + case From =/= none of + true -> + handle_callback_result({Result, flush_broadcast_buffer(State1)}); + false -> + handle_callback_result( + {Result, State1 #state { broadcast_buffer = Buffer1 }}) + end. + +flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) -> + State; +flush_broadcast_buffer(State = #state { self = Self, + members_state = MembersState, + broadcast_buffer = Buffer }) -> + Pubs = lists:reverse(Buffer), + Activity = activity_cons(Self, Pubs, [], activity_nil()), + ok = maybe_send_activity(activity_finalise(Activity), State), + MembersState1 = with_member( + fun (Member = #member { pending_ack = PA }) -> + PA1 = queue:join(PA, queue:from_list(Pubs)), + Member #member { pending_ack = PA1 } + end, Self, MembersState), + State #state { members_state = MembersState1, + broadcast_buffer = [] }. %% --------------------------------------------------------------------------- @@ -1093,16 +1137,22 @@ maybe_monitor(Self, Self) -> maybe_monitor(Other, _Self) -> erlang:monitor(process, Other). -check_neighbours(State = #state { self = Self, - left = Left, - right = Right, - view = View }) -> +check_neighbours(State = #state { self = Self, + left = Left, + right = Right, + view = View, + broadcast_buffer = Buffer }) -> #view_member { left = VLeft, right = VRight } = fetch_view_member(Self, View), Ver = view_version(View), Left1 = ensure_neighbour(Ver, Self, Left, VLeft), Right1 = ensure_neighbour(Ver, Self, Right, VRight), - State1 = State #state { left = Left1, right = Right1 }, + Buffer1 = case Right1 of + {Self, undefined} -> []; + _ -> Buffer + end, + State1 = State #state { left = Left1, right = Right1, + broadcast_buffer = Buffer1 }, ok = maybe_send_catchup(Right, State1), State1. diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl index 1f8832a6..4e30e1d5 100644 --- a/src/gm_soak_test.erl +++ b/src/gm_soak_test.erl @@ -80,12 +80,12 @@ handle_msg([], From, {test_msg, Num}) -> {ok, Num} -> ok; {ok, Num1} when Num < Num1 -> exit({{from, From}, - {duplicate_delivery_of, Num1}, - {expecting, Num}}); + {duplicate_delivery_of, Num}, + {expecting, Num1}}); {ok, Num1} -> exit({{from, From}, - {missing_delivery_of, Num}, - {received_early, Num1}}); + {received_early, Num}, + {expecting, Num1}}); error -> exit({{from, From}, {received_premature_delivery, Num}}) -- cgit v1.2.1 From a6586a1c333ce2499a787c07ec7cb2c8a2cfc180 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 12:58:11 +0000 Subject: Transplant reader_pid vs connection_pid work from bug23350. --- src/rabbit_channel.erl | 39 ++++++++++++++++++++------------------- src/rabbit_channel_sup.erl | 15 ++++++++------- src/rabbit_direct.erl | 14 +++++++------- 3 files changed, 35 insertions(+), 33 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index da103284..b27f6886 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -20,7 +20,7 @@ -behaviour(gen_server2). --export([start_link/9, do/2, do/3, flush/1, shutdown/1]). +-export([start_link/10, do/2, do/3, flush/1, shutdown/1]). -export([send_command/2, deliver/4, flushed/2, confirm/2]). -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). -export([emit_stats/1, ready_for_close/1]). @@ -29,9 +29,9 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2]). --record(ch, {state, protocol, channel, reader_pid, writer_pid, limiter_pid, - start_limiter_fun, transaction_id, tx_participants, next_tag, - uncommitted_ack_q, unacked_message_q, +-record(ch, {state, protocol, channel, reader_pid, writer_pid, connection_pid, + limiter_pid, start_limiter_fun, transaction_id, tx_participants, + next_tag, uncommitted_ack_q, unacked_message_q, user, virtual_host, most_recently_declared_queue, consumer_mapping, blocking, consumer_monitors, queue_collector_pid, stats_timer, confirm_enabled, publish_seqno, unconfirmed_mq, @@ -67,8 +67,8 @@ -type(channel_number() :: non_neg_integer()). --spec(start_link/9 :: - (channel_number(), pid(), pid(), rabbit_types:protocol(), +-spec(start_link/10 :: + (channel_number(), pid(), pid(), pid(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) -> rabbit_types:ok_pid_or_error()). @@ -96,11 +96,11 @@ %%---------------------------------------------------------------------------- -start_link(Channel, ReaderPid, WriterPid, Protocol, User, VHost, Capabilities, - CollectorPid, StartLimiterFun) -> +start_link(Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, + Capabilities, CollectorPid, StartLimiterFun) -> gen_server2:start_link( - ?MODULE, [Channel, ReaderPid, WriterPid, Protocol, User, VHost, - Capabilities, CollectorPid, StartLimiterFun], []). + ?MODULE, [Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, + VHost, Capabilities, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> do(Pid, Method, none). @@ -154,8 +154,8 @@ ready_for_close(Pid) -> %%--------------------------------------------------------------------------- -init([Channel, ReaderPid, WriterPid, Protocol, User, VHost, Capabilities, - CollectorPid, StartLimiterFun]) -> +init([Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, + Capabilities, CollectorPid, StartLimiterFun]) -> process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), StatsTimer = rabbit_event:init_stats_timer(), @@ -164,6 +164,7 @@ init([Channel, ReaderPid, WriterPid, Protocol, User, VHost, Capabilities, channel = Channel, reader_pid = ReaderPid, writer_pid = WriterPid, + connection_pid = ConnectionPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, transaction_id = none, @@ -1410,13 +1411,13 @@ coalesce_and_send(MsgSeqNos, MkMsgFun, infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. -i(pid, _) -> self(); -i(connection, #ch{reader_pid = ReaderPid}) -> ReaderPid; -i(number, #ch{channel = Channel}) -> Channel; -i(user, #ch{user = User}) -> User#user.username; -i(vhost, #ch{virtual_host = VHost}) -> VHost; -i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; -i(confirm, #ch{confirm_enabled = CE}) -> CE; +i(pid, _) -> self(); +i(connection, #ch{connection_pid = Connection}) -> Connection; +i(number, #ch{channel = Channel}) -> Channel; +i(user, #ch{user = User}) -> User#user.username; +i(vhost, #ch{virtual_host = VHost}) -> VHost; +i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; +i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); i(messages_unconfirmed, #ch{unconfirmed_mq = UMQ}) -> diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 8175ad80..7eec0818 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -58,21 +58,22 @@ start_link({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, User, VHost, supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Channel, ReaderPid, WriterPid, Protocol, User, VHost, - Capabilities, Collector, start_limiter_fun(SupPid)]}, + [Channel, ReaderPid, WriterPid, ReaderPid, Protocol, + User, VHost, Capabilities, Collector, + start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), {ok, SupPid, {ChannelPid, AState}}; -start_link({direct, Channel, ClientChannelPid, Protocol, User, VHost, - Capabilities, Collector}) -> +start_link({direct, Channel, ClientChannelPid, ConnectionPid, Protocol, User, + VHost, Capabilities, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Channel, ClientChannelPid, ClientChannelPid, Protocol, - User, VHost, Capabilities, Collector, - start_limiter_fun(SupPid)]}, + [Channel, ClientChannelPid, ClientChannelPid, + ConnectionPid, Protocol, User, VHost, Capabilities, + Collector, start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index a2693c69..568cbea3 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -16,7 +16,7 @@ -module(rabbit_direct). --export([boot/0, connect/4, start_channel/7]). +-export([boot/0, connect/4, start_channel/8]). -include("rabbit.hrl"). @@ -28,8 +28,8 @@ -spec(connect/4 :: (binary(), binary(), binary(), rabbit_types:protocol()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). --spec(start_channel/7 :: - (rabbit_channel:channel_number(), pid(), rabbit_types:protocol(), +-spec(start_channel/8 :: + (rabbit_channel:channel_number(), pid(), pid(), rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}). @@ -69,11 +69,11 @@ connect(Username, Password, VHost, Protocol) -> {error, broker_not_found_on_node} end. -start_channel(Number, ClientChannelPid, Protocol, User, VHost, Capabilities, - Collector) -> +start_channel(Number, ClientChannelPid, ConnectionPid, Protocol, User, VHost, + Capabilities, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, Protocol, User, VHost, - Capabilities, Collector}]), + [{direct, Number, ClientChannelPid, ConnectionPid, Protocol, User, + VHost, Capabilities, Collector}]), {ok, ChannelPid}. -- cgit v1.2.1 From 306ae34a68b5a15c789f68d75965ca9e3e8943ad Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 13:08:22 +0000 Subject: A different and largely opposite version of 'never'... --- src/rabbit_mirror_queue_slave.erl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index d20b00d4..fd501624 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -313,7 +313,6 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) -> {MS1, CMs} = lists:foldl( fun (MsgId, {MSN, CMsN} = Acc) -> - %% We will never see {confirmed, ChPid} here. case dict:find(MsgId, MSN) of error -> %% If it needed confirming, it'll have @@ -327,7 +326,14 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) -> %% Seen from both GM and Channel. Can now %% confirm. {dict:erase(MsgId, MSN), - gb_trees_cons(ChPid, MsgSeqNo, CMsN)} + gb_trees_cons(ChPid, MsgSeqNo, CMsN)}; + {ok, {confirmed, ChPid}} -> + %% It's already been confirmed. This is + %% probably it's been both sync'd to disk + %% and then delivered and ack'd before we've + %% seen the publish from the + %% channel. Nothing to do here. + Acc end end, {MS, gb_trees:empty()}, MsgIds), gb_trees:map(fun (ChPid, MsgSeqNos) -> -- cgit v1.2.1 From ff296ce8d6523280e25dbcb81b3fc82bdcaf7bb5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 13:12:37 +0000 Subject: Use the correct connection pid for exclusivity (and error logging). --- src/rabbit_channel.erl | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b27f6886..19b2eaf4 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -362,14 +362,15 @@ return_ok(State, false, Msg) -> {reply, Msg, State}. ok_msg(true, _Msg) -> undefined; ok_msg(false, Msg) -> Msg. -send_exception(Reason, State = #ch{protocol = Protocol, - channel = Channel, - writer_pid = WriterPid, - reader_pid = ReaderPid}) -> +send_exception(Reason, State = #ch{protocol = Protocol, + channel = Channel, + writer_pid = WriterPid, + reader_pid = ReaderPid, + connection_pid = ConnectionPid}) -> {CloseChannel, CloseMethod} = rabbit_binary_generator:map_exception(Channel, Reason, Protocol), rabbit_log:error("connection ~p, channel ~p - error:~n~p~n", - [ReaderPid, Channel, Reason]), + [ConnectionPid, Channel, Reason]), %% something bad's happened: rollback_and_notify may not be 'ok' {_Result, State1} = rollback_and_notify(State), case CloseChannel of @@ -650,13 +651,13 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, - _, State = #ch{writer_pid = WriterPid, - reader_pid = ReaderPid, - next_tag = DeliveryTag}) -> + _, State = #ch{writer_pid = WriterPid, + connection_pid = ConnectionPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> rabbit_amqqueue:basic_get(Q, self(), NoAck) end) of {ok, MessageCount, Msg = {_QName, QPid, _MsgId, Redelivered, @@ -690,7 +691,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, no_ack = NoAck, exclusive = ExclusiveConsume, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid, + _, State = #ch{connection_pid = ConnectionPid, limiter_pid = LimiterPid, consumer_mapping = ConsumerMapping}) -> case dict:find(ConsumerTag, ConsumerMapping) of @@ -707,7 +708,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, %% behalf. This is for symmetry with basic.cancel - see %% the comment in that method for why. case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> {rabbit_amqqueue:basic_consume( Q, NoAck, self(), LimiterPid, @@ -922,10 +923,10 @@ handle_method(#'queue.declare'{queue = QueueNameBin, nowait = NoWait, arguments = Args} = Declare, _, State = #ch{virtual_host = VHostPath, - reader_pid = ReaderPid, + connection_pid = ConnectionPid, queue_collector_pid = CollectorPid}) -> Owner = case ExclusiveDeclare of - true -> ReaderPid; + true -> ConnectionPid; false -> none end, ActualNameBin = case QueueNameBin of @@ -967,14 +968,14 @@ handle_method(#'queue.declare'{queue = QueueNameBin, handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, nowait = NoWait}, - _, State = #ch{virtual_host = VHostPath, - reader_pid = ReaderPid}) -> + _, State = #ch{virtual_host = VHostPath, + connection_pid = ConnectionPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = rabbit_amqqueue:with_or_die( QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end), - ok = rabbit_amqqueue:check_exclusive_access(Q, ReaderPid), + ok = rabbit_amqqueue:check_exclusive_access(Q, ConnectionPid), return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount, State); @@ -982,11 +983,11 @@ handle_method(#'queue.delete'{queue = QueueNameBin, if_unused = IfUnused, if_empty = IfEmpty, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid}) -> + _, State = #ch{connection_pid = ConnectionPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_configure_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> rabbit_amqqueue:delete(Q, IfUnused, IfEmpty) end) of {error, in_use} -> rabbit_misc:protocol_error( @@ -1018,11 +1019,11 @@ handle_method(#'queue.unbind'{queue = QueueNameBin, handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait}, - _, State = #ch{reader_pid = ReaderPid}) -> + _, State = #ch{connection_pid = ConnectionPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ReaderPid, + QueueName, ConnectionPid, fun (Q) -> rabbit_amqqueue:purge(Q) end), return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); @@ -1142,8 +1143,8 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, - State = #ch{virtual_host = VHostPath, - reader_pid = ReaderPid}) -> + State = #ch{virtual_host = VHostPath, + connection_pid = ConnectionPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - @@ -1159,7 +1160,8 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, key = ActualRoutingKey, args = Arguments}, fun (_X, Q = #amqqueue{}) -> - try rabbit_amqqueue:check_exclusive_access(Q, ReaderPid) + try rabbit_amqqueue:check_exclusive_access(Q, + ConnectionPid) catch exit:Reason -> {error, Reason} end; (_X, #exchange{}) -> -- cgit v1.2.1 From a65e7a57cb8f5bf4c4fe562d3ac3dae2a06f7ffd Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 13:15:51 +0000 Subject: Try to make this branch vertical space neutral. Sadly we can't do this by planting a vertical space tree. --- src/rabbit_channel.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 19b2eaf4..370654a9 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1144,7 +1144,7 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath, - connection_pid = ConnectionPid }) -> + connection_pid = ConnPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - @@ -1160,8 +1160,7 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, key = ActualRoutingKey, args = Arguments}, fun (_X, Q = #amqqueue{}) -> - try rabbit_amqqueue:check_exclusive_access(Q, - ConnectionPid) + try rabbit_amqqueue:check_exclusive_access(Q, ConnPid) catch exit:Reason -> {error, Reason} end; (_X, #exchange{}) -> -- cgit v1.2.1 From 837e4a8e7328a586ad83707041d36652ac548417 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 13:16:30 +0000 Subject: whoops --- src/rabbit_mirror_queue_slave.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index fd501624..4a9dc1fe 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -327,7 +327,7 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) -> %% confirm. {dict:erase(MsgId, MSN), gb_trees_cons(ChPid, MsgSeqNo, CMsN)}; - {ok, {confirmed, ChPid}} -> + {ok, {confirmed, _ChPid}} -> %% It's already been confirmed. This is %% probably it's been both sync'd to disk %% and then delivered and ack'd before we've -- cgit v1.2.1 From 8c14e112021a54adcb32365f40f54c0ae766487e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 13:19:03 +0000 Subject: minor refactor of test --- src/rabbit_tests.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 87c905d7..505570e2 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1628,10 +1628,8 @@ test_file_handle_cache() -> ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"), ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")), - Src1 = filename:join(TmpDir, "file1"), - Dst1 = filename:join(TmpDir, "file2"), - Src2 = filename:join(TmpDir, "file3"), - Dst2 = filename:join(TmpDir, "file4"), + [Src1, Dst1, Src2, Dst2] = Files = + [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]], Content = <<"foo">>, CopyFun = fun (Src, Dst) -> ok = file:write_file(Src, Content), @@ -1643,7 +1641,7 @@ test_file_handle_cache() -> ok = file_handle_cache:delete(DstHdl) end, Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open( - filename:join(TmpDir, "file3"), + filename:join(TmpDir, "file5"), [write], []), receive {next, Pid1} -> Pid1 ! {next, self()} end, file_handle_cache:delete(Hdl), @@ -1665,7 +1663,7 @@ test_file_handle_cache() -> exit(Pid1, kill), receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end, receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end, - [file:delete(File) || File <- [Src1, Dst1, Src2, Dst2]], + [file:delete(File) || File <- Files], ok = file_handle_cache:set_limit(Limit), passed. -- cgit v1.2.1 From 7556d6ae1e71ffc07dab7666216e94bbd91c1dec Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 13:36:31 +0000 Subject: refactorings --- src/rabbit_amqqueue_process.erl | 56 +++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 96352c13..4ebdb7a3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -454,11 +454,10 @@ should_confirm_message(_Delivery, _State) -> needs_confirming({eventually, _, _, _}) -> true; needs_confirming(_) -> false. -record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, - State = #q{msg_id_to_channel = MTC}) -> +maybe_record_confirm_message({eventually, ChPid, MsgSeqNo, MsgId}, + State = #q{msg_id_to_channel = MTC}) -> State#q{msg_id_to_channel = dict:store(MsgId, {ChPid, MsgSeqNo}, MTC)}; -record_confirm_message(Confirm, State) - when Confirm =:= immediately orelse Confirm =:= never -> +maybe_record_confirm_message(_Confirm, State) -> State. run_message_queue(State) -> @@ -473,9 +472,9 @@ run_message_queue(State) -> attempt_delivery(#delivery{txn = none, sender = ChPid, message = Message, - msg_seq_no = MsgSeqNo}, - Confirm, + msg_seq_no = MsgSeqNo} = Delivery, State = #q{backing_queue = BQ}) -> + Confirm = should_confirm_message(Delivery, State), case Confirm of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok @@ -500,28 +499,26 @@ attempt_delivery(#delivery{txn = none, {Delivered, Confirm, State1}; attempt_delivery(#delivery{txn = Txn, sender = ChPid, - message = Message}, - Confirm, + message = Message} = Delivery, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), - {true, Confirm, State#q{backing_queue_state = BQS1}}. - -deliver_or_enqueue(Delivery, State) -> - case attempt_delivery(Delivery, - should_confirm_message(Delivery, State), State) of - {true, Confirm, State1} -> - record_confirm_message(Confirm, State1); - {false, Confirm, State1 = #q{backing_queue = BQ, - backing_queue_state = BQS}} -> - #delivery{message = Message} = Delivery, - BQS1 = BQ:publish(Message, - (message_properties(State)) #message_properties{ - needs_confirming = needs_confirming(Confirm)}, - BQS), - State2 = record_confirm_message(Confirm, State1), - ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) + {true, should_confirm_message(Delivery, State), + State#q{backing_queue_state = BQS1}}. + +deliver_or_enqueue(Delivery = #delivery{message = Message}, State) -> + {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), + State2 = #q{backing_queue = BQ, backing_queue_state = BQS} = + maybe_record_confirm_message(Confirm, State1), + case Delivered of + true -> State2; + false -> BQS1 = + BQ:publish(Message, + (message_properties(State)) #message_properties{ + needs_confirming = needs_confirming(Confirm)}, + BQS), + ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> @@ -840,15 +837,8 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% just all ready-to-consume queues get the message, with unready %% queues discarding the message? %% - {Delivered, Confirm, State1} = - attempt_delivery(Delivery, - should_confirm_message(Delivery, State), - State), - State2 = case Delivered andalso needs_confirming(Confirm) of - true -> record_confirm_message(Confirm, State); - false -> State1 - end, - reply(Delivered, State2); + {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), + reply(Delivered, maybe_record_confirm_message(Confirm, State1)); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. -- cgit v1.2.1 From 046d25da345ae888beaf9cc1f4125e596bc5eac1 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 13:37:21 +0000 Subject: How could I forget those joyous uses of channel in tests? --- src/rabbit_tests.erl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 930923e8..b8c3f4a9 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1121,8 +1121,9 @@ test_server_status() -> %% create a few things so there is some useful information to list Writer = spawn(fun () -> receive shutdown -> ok end end), {ok, Ch} = rabbit_channel:start_link( - 1, self(), Writer, rabbit_framing_amqp_0_9_1, user(<<"user">>), - <<"/">>, [], self(), fun (_) -> {ok, self()} end), + 1, self(), Writer, self(), rabbit_framing_amqp_0_9_1, + user(<<"user">>), <<"/">>, [], self(), + fun (_) -> {ok, self()} end), [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>], {new, Queue = #amqqueue{}} <- [rabbit_amqqueue:declare( @@ -1181,8 +1182,9 @@ test_spawn(Receiver) -> Me = self(), Writer = spawn(fun () -> Receiver(Me) end), {ok, Ch} = rabbit_channel:start_link( - 1, Me, Writer, rabbit_framing_amqp_0_9_1, user(<<"guest">>), - <<"/">>, [], self(), fun (_) -> {ok, self()} end), + 1, Me, Writer, Me, rabbit_framing_amqp_0_9_1, + user(<<"guest">>), <<"/">>, [], self(), + fun (_) -> {ok, self()} end), ok = rabbit_channel:do(Ch, #'channel.open'{}), receive #'channel.open_ok'{} -> ok after 1000 -> throw(failed_to_receive_channel_open_ok) -- cgit v1.2.1 From b87031eaa490a155e8737c0904b2ce9c62542cb8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 16 Mar 2011 14:29:58 +0000 Subject: Say "ConnPid" everywhere. --- src/rabbit_channel.erl | 64 +++++++++++++++++++++++----------------------- src/rabbit_channel_sup.erl | 10 ++++---- src/rabbit_direct.erl | 6 ++--- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 370654a9..2d2d9d60 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -29,7 +29,7 @@ handle_info/2, handle_pre_hibernate/1, prioritise_call/3, prioritise_cast/2]). --record(ch, {state, protocol, channel, reader_pid, writer_pid, connection_pid, +-record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, limiter_pid, start_limiter_fun, transaction_id, tx_participants, next_tag, uncommitted_ack_q, unacked_message_q, user, virtual_host, most_recently_declared_queue, @@ -96,10 +96,10 @@ %%---------------------------------------------------------------------------- -start_link(Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, +start_link(Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun) -> gen_server2:start_link( - ?MODULE, [Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, + ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun], []). do(Pid, Method) -> @@ -154,7 +154,7 @@ ready_for_close(Pid) -> %%--------------------------------------------------------------------------- -init([Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, +init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun]) -> process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), @@ -164,7 +164,7 @@ init([Channel, ReaderPid, WriterPid, ConnectionPid, Protocol, User, VHost, channel = Channel, reader_pid = ReaderPid, writer_pid = WriterPid, - connection_pid = ConnectionPid, + conn_pid = ConnPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, transaction_id = none, @@ -362,15 +362,15 @@ return_ok(State, false, Msg) -> {reply, Msg, State}. ok_msg(true, _Msg) -> undefined; ok_msg(false, Msg) -> Msg. -send_exception(Reason, State = #ch{protocol = Protocol, - channel = Channel, - writer_pid = WriterPid, - reader_pid = ReaderPid, - connection_pid = ConnectionPid}) -> +send_exception(Reason, State = #ch{protocol = Protocol, + channel = Channel, + writer_pid = WriterPid, + reader_pid = ReaderPid, + conn_pid = ConnPid}) -> {CloseChannel, CloseMethod} = rabbit_binary_generator:map_exception(Channel, Reason, Protocol), rabbit_log:error("connection ~p, channel ~p - error:~n~p~n", - [ConnectionPid, Channel, Reason]), + [ConnPid, Channel, Reason]), %% something bad's happened: rollback_and_notify may not be 'ok' {_Result, State1} = rollback_and_notify(State), case CloseChannel of @@ -652,12 +652,12 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, _, State = #ch{writer_pid = WriterPid, - connection_pid = ConnectionPid, - next_tag = DeliveryTag}) -> + conn_pid = ConnPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> rabbit_amqqueue:basic_get(Q, self(), NoAck) end) of {ok, MessageCount, Msg = {_QName, QPid, _MsgId, Redelivered, @@ -691,7 +691,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, no_ack = NoAck, exclusive = ExclusiveConsume, nowait = NoWait}, - _, State = #ch{connection_pid = ConnectionPid, + _, State = #ch{conn_pid = ConnPid, limiter_pid = LimiterPid, consumer_mapping = ConsumerMapping}) -> case dict:find(ConsumerTag, ConsumerMapping) of @@ -708,7 +708,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin, %% behalf. This is for symmetry with basic.cancel - see %% the comment in that method for why. case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> {rabbit_amqqueue:basic_consume( Q, NoAck, self(), LimiterPid, @@ -923,10 +923,10 @@ handle_method(#'queue.declare'{queue = QueueNameBin, nowait = NoWait, arguments = Args} = Declare, _, State = #ch{virtual_host = VHostPath, - connection_pid = ConnectionPid, + conn_pid = ConnPid, queue_collector_pid = CollectorPid}) -> Owner = case ExclusiveDeclare of - true -> ConnectionPid; + true -> ConnPid; false -> none end, ActualNameBin = case QueueNameBin of @@ -969,13 +969,13 @@ handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, nowait = NoWait}, _, State = #ch{virtual_host = VHostPath, - connection_pid = ConnectionPid}) -> + conn_pid = ConnPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = rabbit_amqqueue:with_or_die( QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end), - ok = rabbit_amqqueue:check_exclusive_access(Q, ConnectionPid), + ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid), return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount, State); @@ -983,11 +983,11 @@ handle_method(#'queue.delete'{queue = QueueNameBin, if_unused = IfUnused, if_empty = IfEmpty, nowait = NoWait}, - _, State = #ch{connection_pid = ConnectionPid}) -> + _, State = #ch{conn_pid = ConnPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_configure_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> rabbit_amqqueue:delete(Q, IfUnused, IfEmpty) end) of {error, in_use} -> rabbit_misc:protocol_error( @@ -1019,11 +1019,11 @@ handle_method(#'queue.unbind'{queue = QueueNameBin, handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait}, - _, State = #ch{connection_pid = ConnectionPid}) -> + _, State = #ch{conn_pid = ConnPid}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die( - QueueName, ConnectionPid, + QueueName, ConnPid, fun (Q) -> rabbit_amqqueue:purge(Q) end), return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); @@ -1144,7 +1144,7 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, State = #ch{virtual_host = VHostPath, - connection_pid = ConnPid }) -> + conn_pid = ConnPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - @@ -1412,13 +1412,13 @@ coalesce_and_send(MsgSeqNos, MkMsgFun, infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. -i(pid, _) -> self(); -i(connection, #ch{connection_pid = Connection}) -> Connection; -i(number, #ch{channel = Channel}) -> Channel; -i(user, #ch{user = User}) -> User#user.username; -i(vhost, #ch{virtual_host = VHost}) -> VHost; -i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; -i(confirm, #ch{confirm_enabled = CE}) -> CE; +i(pid, _) -> self(); +i(connection, #ch{conn_pid = ConnPid}) -> ConnPid; +i(number, #ch{channel = Channel}) -> Channel; +i(user, #ch{user = User}) -> User#user.username; +i(vhost, #ch{virtual_host = VHost}) -> VHost; +i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; +i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); i(messages_unconfirmed, #ch{unconfirmed_mq = UMQ}) -> diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl index 7eec0818..65ccca02 100644 --- a/src/rabbit_channel_sup.erl +++ b/src/rabbit_channel_sup.erl @@ -64,16 +64,16 @@ start_link({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, User, VHost, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, AState} = rabbit_command_assembler:init(Protocol), {ok, SupPid, {ChannelPid, AState}}; -start_link({direct, Channel, ClientChannelPid, ConnectionPid, Protocol, User, - VHost, Capabilities, Collector}) -> +start_link({direct, Channel, ClientChannelPid, ConnPid, Protocol, User, VHost, + Capabilities, Collector}) -> {ok, SupPid} = supervisor2:start_link(?MODULE, []), {ok, ChannelPid} = supervisor2:start_child( SupPid, {channel, {rabbit_channel, start_link, - [Channel, ClientChannelPid, ClientChannelPid, - ConnectionPid, Protocol, User, VHost, Capabilities, - Collector, start_limiter_fun(SupPid)]}, + [Channel, ClientChannelPid, ClientChannelPid, ConnPid, + Protocol, User, VHost, Capabilities, Collector, + start_limiter_fun(SupPid)]}, intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}), {ok, SupPid, {ChannelPid, none}}. diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 568cbea3..0810c762 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -69,11 +69,11 @@ connect(Username, Password, VHost, Protocol) -> {error, broker_not_found_on_node} end. -start_channel(Number, ClientChannelPid, ConnectionPid, Protocol, User, VHost, +start_channel(Number, ClientChannelPid, ConnPid, Protocol, User, VHost, Capabilities, Collector) -> {ok, _, {ChannelPid, _}} = supervisor2:start_child( rabbit_direct_client_sup, - [{direct, Number, ClientChannelPid, ConnectionPid, Protocol, User, - VHost, Capabilities, Collector}]), + [{direct, Number, ClientChannelPid, ConnPid, Protocol, User, VHost, + Capabilities, Collector}]), {ok, ChannelPid}. -- cgit v1.2.1 From f4e4bdc1bfefd70bf3d11b40ecda8f67727d2424 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 15:16:49 +0000 Subject: cosmetic --- src/rabbit_channel.erl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 2d2d9d60..0c12614c 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -651,9 +651,9 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, - _, State = #ch{writer_pid = WriterPid, - conn_pid = ConnPid, - next_tag = DeliveryTag}) -> + _, State = #ch{writer_pid = WriterPid, + conn_pid = ConnPid, + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( @@ -968,8 +968,8 @@ handle_method(#'queue.declare'{queue = QueueNameBin, handle_method(#'queue.declare'{queue = QueueNameBin, passive = true, nowait = NoWait}, - _, State = #ch{virtual_host = VHostPath, - conn_pid = ConnPid}) -> + _, State = #ch{virtual_host = VHostPath, + conn_pid = ConnPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = @@ -1143,8 +1143,8 @@ handle_consuming_queue_down(MRef, ConsumerTag, binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin, RoutingKey, Arguments, ReturnMethod, NoWait, - State = #ch{virtual_host = VHostPath, - conn_pid = ConnPid }) -> + State = #ch{virtual_host = VHostPath, + conn_pid = ConnPid }) -> %% FIXME: connection exception (!) on failure?? %% (see rule named "failure" in spec-XML) %% FIXME: don't allow binding to internal exchanges - -- cgit v1.2.1 From ae2e8ee3a60753439654ea6feef90ca7df3a3096 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 17:30:30 +0000 Subject: Abstract and rewrite schema_version handling functions --- src/rabbit_mnesia.erl | 18 +++++---- src/rabbit_upgrade.erl | 96 ++++++++++++++------------------------------- src/rabbit_version.erl | 103 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+), 74 deletions(-) create mode 100644 src/rabbit_version.erl diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index e61f5fce..fa442c9c 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -442,7 +442,7 @@ init_db(ClusterNodes, Force) -> {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up ensure_version_ok( - rpc:call(AnotherNode, rabbit_upgrade, read_version, [])), + rpc:call(AnotherNode, rabbit_version, read, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), @@ -457,7 +457,8 @@ init_db(ClusterNodes, Force) -> %% If we're just starting up a new node we won't have %% a version version_not_available -> - ok = rabbit_upgrade:write_version() + ok = rabbit_version:write( + rabbit_upgrade:desired_version()) end, ensure_schema_integrity() end; @@ -484,13 +485,14 @@ schema_ok_or_move() -> end. ensure_version_ok({ok, DiscVersion}) -> - case rabbit_upgrade:desired_version() of - DiscVersion -> ok; - DesiredVersion -> throw({error, {schema_mismatch, - DesiredVersion, DiscVersion}}) + DesiredVersion = rabbit_upgrade:desired_version(), + case rabbit_version:'=~='(DesiredVersion, DiscVersion) of + true -> ok; + false -> throw({error, {schema_mismatch, + DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> - ok = rabbit_upgrade:write_version(). + ok = rabbit_version:write(rabbit_upgrade:desired_version()). create_schema() -> mnesia:stop(), @@ -500,7 +502,7 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = rabbit_upgrade:write_version(). + ok = rabbit_version:write(rabbit_upgrade:desired_version()). move_db() -> mnesia:stop(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f1134cfa..7a4a4fd8 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -17,8 +17,7 @@ -module(rabbit_upgrade). -export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). --export([read_version/0, write_version/0, desired_version/0, - desired_version/1]). +-export([desired_version/0]). -include("rabbit.hrl"). @@ -30,16 +29,9 @@ -ifdef(use_specs). --type(step() :: atom()). --type(version() :: [{scope(), [step()]}]). --type(scope() :: 'mnesia' | 'local'). - -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). --spec(read_version/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec(write_version/0 :: () -> 'ok'). --spec(desired_version/0 :: () -> version()). --spec(desired_version/1 :: (scope()) -> [step()]). +-spec(desired_version/0 :: () -> rabbit_version:version()). -endif. @@ -173,7 +165,7 @@ is_disc_node() -> %% This is pretty ugly but we can't start Mnesia and ask it (will hang), %% we can't look at the config file (may not include us even if we're a %% disc node). - filelib:is_regular(rabbit_mnesia:dir() ++ "/rabbit_durable_exchange.DCD"). + filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). die(Msg, Args) -> %% We don't throw or exit here since that gets thrown @@ -216,7 +208,7 @@ secondary_upgrade(AllNodes) -> end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = rabbit_mnesia:init_db(ClusterNodes, true), - ok = write_version(mnesia), + ok = write_desired_scope_version(mnesia), ok. nodes_running(Nodes) -> @@ -238,63 +230,37 @@ maybe_upgrade_local() -> fun() -> ok end) end. -read_version() -> - case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> {ok, V}; - {error, _} = Err -> Err - end. - -read_version(Scope) -> - case read_version() of - {error, _} = E -> E; - {ok, V} -> {ok, filter_by_scope(Scope, V)} - end. - -write_version() -> - ok = rabbit_misc:write_term_file(schema_filename(), [desired_version()]), - ok. - -write_version(Scope) -> - {ok, V0} = read_version(), - V = flatten([case S of - Scope -> desired_version(S); - _ -> filter_by_scope(S, V0) - end || S <- ?SCOPES]), - ok = rabbit_misc:write_term_file(schema_filename(), [V]), - ok. - -desired_version() -> - flatten([desired_version(Scope) || Scope <- ?SCOPES]). +desired_version() -> [{Scope, desired_version(Scope)} || Scope <- ?SCOPES]. -desired_version(Scope) -> - with_upgrade_graph(fun (G) -> heads(G) end, Scope). +desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). -flatten(LoL) -> - lists:sort(lists:append(LoL)). - -filter_by_scope(Scope, Versions) -> - with_upgrade_graph( - fun(G) -> - ScopeVs = digraph:vertices(G), - [V || V <- Versions, lists:member(V, ScopeVs)] - end, Scope). +write_desired_scope_version(Scope) -> + ok = rabbit_version:with_scope_version( + Scope, + fun ({error, Error}) -> + throw({error, {can_not_read_version_to_write_it, Error}}) + end, + fun (_SV) -> {desired_version(Scope), ok} end). %% ------------------------------------------------------------------- upgrades_required(Scope) -> - case read_version(Scope) of - {ok, CurrentHeads} -> - with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> upgrades_to_apply(CurrentHeads, G); - Unknown -> throw({error, - {future_upgrades_found, Unknown}}) - end - end, Scope); - {error, enoent} -> - version_not_available - end. + rabbit_version:with_scope_version( + Scope, + fun ({error, enoent}) -> version_not_available end, + fun (CurrentHeads) -> + {CurrentHeads, + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> + upgrades_to_apply(CurrentHeads, G); + Unknown -> + throw({error, + {future_upgrades_found, Unknown}}) + end + end, Scope)} + end). with_upgrade_graph(Fun, Scope) -> case rabbit_misc:build_acyclic_graph( @@ -363,7 +329,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = write_version(Scope), + ok = write_desired_scope_version(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); @@ -386,8 +352,6 @@ apply_upgrade(Scope, {M, F}) -> dir() -> rabbit_mnesia:dir(). -schema_filename() -> filename:join(dir(), ?VERSION_FILENAME). - lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). %% NB: we cannot use rabbit_log here since it may not have been diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl new file mode 100644 index 00000000..c88d57fe --- /dev/null +++ b/src/rabbit_version.erl @@ -0,0 +1,103 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_version). + +-export([read/0, write/1, with_scope_version/3, '=~='/2]). + +%% ------------------------------------------------------------------- +-ifdef(use_specs). + +-export_type([step/0, version/0, scope/0]). + +-type(step() :: atom()). +-type(version() :: [{scope(), [step()]}]). +-type(scope() :: atom()). + +-spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). +-spec(write/1 :: (version()) -> 'ok'). +-spec(with_scope_version/3 :: + (scope(), + fun (({'error', any()}) -> E), + fun (([step()]) -> {[step()], A})) -> E | A). +-spec('=~='/2 :: (version(), version()) -> boolean()). + +-endif. +%% ------------------------------------------------------------------- + +-define(VERSION_FILENAME, "schema_version"). + +%% ------------------------------------------------------------------- + +read() -> + case rabbit_misc:read_term_file(schema_filename()) of + {ok, [V]} -> {ok, categorise_by_scope(V)}; + {error, _} = Err -> Err + end. + +write(Version) -> + V = [Name || {_Scope, Names} <- Version, Name <- Names], + ok = rabbit_misc:write_term_file(schema_filename(), [V]). + +with_scope_version(Scope, ErrorHandler, Fun) -> + case read() of + {error, _} = Err -> + ErrorHandler(Err); + {ok, Version} -> + SV = case lists:keysearch(Scope, 1, Version) of + false -> []; + {value, {Scope, SV1}} -> SV1 + end, + {SV2, Result} = Fun(SV), + ok = case SV =:= SV2 of + true -> ok; + false -> write(lists:keystore(Scope, 1, Version, + {Scope, SV2})) + end, + Result + end. + +'=~='(VerA, VerB) -> + matches(lists:usort(VerA), lists:usort(VerB)). + +%% ------------------------------------------------------------------- + +matches([], []) -> + true; +matches([{Scope, SV}|VerA], [{Scope, SV}|VerB]) -> + matches(VerA, VerB); +matches([{Scope, SVA}|VerA], [{Scope, SVB}|VerB]) -> + case {lists:usort(SVA), lists:usort(SVB)} of + {SV, SV} -> matches(VerA, VerB); + _ -> false + end; +matches(_VerA, _VerB) -> + false. + +categorise_by_scope(Heads) when is_list(Heads) -> + Categorised = + [{Scope, Name} || {_Module, Attributes} <- + rabbit_misc:all_module_attributes(rabbit_upgrade), + {Name, Scope, _Requires} <- Attributes, + lists:member(Name, Heads)], + orddict:to_list( + lists:foldl(fun ({Scope, Name}, Version) -> + rabbit_misc:orddict_cons(Scope, Name, Version) + end, orddict:new(), Categorised)). + +dir() -> rabbit_mnesia:dir(). + +schema_filename() -> filename:join(dir(), ?VERSION_FILENAME). -- cgit v1.2.1 From 7f05a48a9e4ebbd62d41e3be3d514f689318abeb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 18:02:15 +0000 Subject: english --- src/rabbit_upgrade.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 7a4a4fd8..9b2ffa28 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -238,7 +238,7 @@ write_desired_scope_version(Scope) -> ok = rabbit_version:with_scope_version( Scope, fun ({error, Error}) -> - throw({error, {can_not_read_version_to_write_it, Error}}) + throw({error, {cannot_read_version_to_write_it, Error}}) end, fun (_SV) -> {desired_version(Scope), ok} end). -- cgit v1.2.1 From f10c0b62e57dd2e5d0f2dd877e03dfd699298cc9 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 16 Mar 2011 18:13:19 +0000 Subject: ordering --- src/rabbit_version.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index c88d57fe..8c577f9c 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -23,9 +23,9 @@ -export_type([step/0, version/0, scope/0]). +-type(scope() :: atom()). -type(step() :: atom()). -type(version() :: [{scope(), [step()]}]). --type(scope() :: atom()). -spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). -spec(write/1 :: (version()) -> 'ok'). -- cgit v1.2.1 From 221433535cd1551a83132d0a8d46440dd12ea433 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 00:52:11 +0000 Subject: incorporate qa feedback. The version.erl api is rather nice now: the version itself is entirely opaque - whilst it can be read, there's nothing provided to decompose it at all. --- src/rabbit_mnesia.erl | 9 ++- src/rabbit_upgrade.erl | 116 +++++++---------------------------- src/rabbit_version.erl | 160 +++++++++++++++++++++++++++++++++++-------------- 3 files changed, 139 insertions(+), 146 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index fa442c9c..4902cfeb 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -457,8 +457,7 @@ init_db(ClusterNodes, Force) -> %% If we're just starting up a new node we won't have %% a version version_not_available -> - ok = rabbit_version:write( - rabbit_upgrade:desired_version()) + ok = rabbit_version:write_desired_version() end, ensure_schema_integrity() end; @@ -485,14 +484,14 @@ schema_ok_or_move() -> end. ensure_version_ok({ok, DiscVersion}) -> - DesiredVersion = rabbit_upgrade:desired_version(), + DesiredVersion = rabbit_version:desired_version(), case rabbit_version:'=~='(DesiredVersion, DiscVersion) of true -> ok; false -> throw({error, {schema_mismatch, DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> - ok = rabbit_version:write(rabbit_upgrade:desired_version()). + ok = rabbit_version:write_desired_version(). create_schema() -> mnesia:stop(), @@ -502,7 +501,7 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = rabbit_version:write(rabbit_upgrade:desired_version()). + ok = rabbit_version:write_desired_version(). move_db() -> mnesia:stop(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9b2ffa28..9347cc53 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -17,13 +17,11 @@ -module(rabbit_upgrade). -export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). --export([desired_version/0]). -include("rabbit.hrl"). -define(VERSION_FILENAME, "schema_version"). -define(LOCK_FILENAME, "schema_upgrade_lock"). --define(SCOPES, [mnesia, local]). %% ------------------------------------------------------------------- @@ -31,7 +29,6 @@ -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). --spec(desired_version/0 :: () -> rabbit_version:version()). -endif. @@ -96,8 +93,8 @@ maybe_upgrade_mnesia() -> AllNodes = rabbit_mnesia:all_clustered_nodes(), - case upgrades_required(mnesia) of - version_not_available -> + case rabbit_version:upgrades_required(mnesia) of + {error, version_not_available} -> rabbit:prepare(), %% Ensure we have logs for this case AllNodes of [_] -> ok; @@ -105,9 +102,11 @@ maybe_upgrade_mnesia() -> "< 2.1.1.~nUnfortunately you will need to " "rebuild the cluster.", []) end; - [] -> + {error, _} = Err -> + throw(Err); + {ok, []} -> ok; - Upgrades -> + {ok, Upgrades} -> rabbit:prepare(), %% Ensure we have logs for this case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); @@ -142,18 +141,19 @@ upgrade_mode(AllNodes) -> end; [Another|_] -> ClusterVersion = - case rpc:call(Another, - rabbit_upgrade, desired_version, [mnesia]) of + case rpc:call(Another, rabbit_version, desired_scope_version, + [mnesia]) of {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; {badrpc, Reason} -> {unknown, Reason}; V -> V end, - case desired_version(mnesia) of - ClusterVersion -> + MyVersion = rabbit_version:desired_scope_version(mnesia), + case rabbit_version:'=~='(ClusterVersion, MyVersion) of + true -> %% The other node(s) have upgraded already, I am not the %% upgrader secondary; - MyVersion -> + false -> %% The other node(s) are running an unexpected version. die("Cluster upgrade needed but other nodes are " "running ~p~nand I want ~p", @@ -208,7 +208,7 @@ secondary_upgrade(AllNodes) -> end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = rabbit_mnesia:init_db(ClusterNodes, true), - ok = write_desired_scope_version(mnesia), + ok = rabbit_version:write_desired_scope_version(mnesia), ok. nodes_running(Nodes) -> @@ -223,90 +223,14 @@ node_running(Node) -> %% ------------------------------------------------------------------- maybe_upgrade_local() -> - case upgrades_required(local) of - version_not_available -> version_not_available; - [] -> ok; - Upgrades -> apply_upgrades(local, Upgrades, - fun() -> ok end) + case rabbit_version:upgrades_required(local) of + {error, version_not_available} -> version_not_available; + {error, _} = Err -> throw(Err); + {ok, []} -> ok; + {ok, Upgrades} -> apply_upgrades(local, Upgrades, + fun () -> ok end) end. -desired_version() -> [{Scope, desired_version(Scope)} || Scope <- ?SCOPES]. - -desired_version(Scope) -> with_upgrade_graph(fun (G) -> heads(G) end, Scope). - -write_desired_scope_version(Scope) -> - ok = rabbit_version:with_scope_version( - Scope, - fun ({error, Error}) -> - throw({error, {cannot_read_version_to_write_it, Error}}) - end, - fun (_SV) -> {desired_version(Scope), ok} end). - -%% ------------------------------------------------------------------- - -upgrades_required(Scope) -> - rabbit_version:with_scope_version( - Scope, - fun ({error, enoent}) -> version_not_available end, - fun (CurrentHeads) -> - {CurrentHeads, - with_upgrade_graph( - fun (G) -> - case unknown_heads(CurrentHeads, G) of - [] -> - upgrades_to_apply(CurrentHeads, G); - Unknown -> - throw({error, - {future_upgrades_found, Unknown}}) - end - end, Scope)} - end). - -with_upgrade_graph(Fun, Scope) -> - case rabbit_misc:build_acyclic_graph( - fun (Module, Steps) -> vertices(Module, Steps, Scope) end, - fun (Module, Steps) -> edges(Module, Steps, Scope) end, - rabbit_misc:all_module_attributes(rabbit_upgrade)) of - {ok, G} -> try - Fun(G) - after - true = digraph:delete(G) - end; - {error, {vertex, duplicate, StepName}} -> - throw({error, {duplicate_upgrade_step, StepName}}); - {error, {edge, {bad_vertex, StepName}, _From, _To}} -> - throw({error, {dependency_on_unknown_upgrade_step, StepName}}); - {error, {edge, {bad_edge, StepNames}, _From, _To}} -> - throw({error, {cycle_in_upgrade_steps, StepNames}}) - end. - -vertices(Module, Steps, Scope0) -> - [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps, - Scope0 == Scope1]. - -edges(_Module, Steps, Scope0) -> - [{Require, StepName} || {StepName, Scope1, Requires} <- Steps, - Require <- Requires, - Scope0 == Scope1]. -unknown_heads(Heads, G) -> - [H || H <- Heads, digraph:vertex(G, H) =:= false]. - -upgrades_to_apply(Heads, G) -> - %% Take all the vertices which can reach the known heads. That's - %% everything we've already applied. Subtract that from all - %% vertices: that's what we have to apply. - Unsorted = sets:to_list( - sets:subtract( - sets:from_list(digraph:vertices(G)), - sets:from_list(digraph_utils:reaching(Heads, G)))), - %% Form a subgraph from that list and find a topological ordering - %% so we can invoke them in order. - [element(2, digraph:vertex(G, StepName)) || - StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. - -heads(G) -> - lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). - %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> @@ -329,7 +253,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = write_desired_scope_version(Scope), + ok = rabbit_version:write_desired_scope_version(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index 8c577f9c..2d7ba8e4 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -16,86 +16,156 @@ -module(rabbit_version). --export([read/0, write/1, with_scope_version/3, '=~='/2]). +-export([read/0, '=~='/2, desired_version/0, desired_scope_version/1, + write_desired_version/0, write_desired_scope_version/1, + upgrades_required/1]). %% ------------------------------------------------------------------- -ifdef(use_specs). --export_type([step/0, version/0, scope/0]). +-export_type([scope/0, step/0, scope_version/0]). -type(scope() :: atom()). --type(step() :: atom()). --type(version() :: [{scope(), [step()]}]). +-type(scope_version() :: [atom()]). +-type(step() :: {atom(), atom()}). + +-type(version() :: [atom()]). -spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec(write/1 :: (version()) -> 'ok'). --spec(with_scope_version/3 :: - (scope(), - fun (({'error', any()}) -> E), - fun (([step()]) -> {[step()], A})) -> E | A). -spec('=~='/2 :: (version(), version()) -> boolean()). +-spec(desired_version/0 :: () -> version()). +-spec(desired_scope_version/1 :: (scope()) -> scope_version()). +-spec(write_desired_version/0 :: () -> 'ok'). +-spec(write_desired_scope_version/1 :: + (scope()) -> rabbit_types:ok_or_error(any())). +-spec(upgrades_required/1 :: + (scope()) -> rabbit_types:ok_or_error2([step()], any())). -endif. %% ------------------------------------------------------------------- -define(VERSION_FILENAME, "schema_version"). +-define(SCOPES, [mnesia, local]). %% ------------------------------------------------------------------- -read() -> - case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> {ok, categorise_by_scope(V)}; - {error, _} = Err -> Err - end. +read() -> case rabbit_misc:read_term_file(schema_filename()) of + {ok, [V]} -> {ok, V}; + {error, _} = Err -> Err + end. -write(Version) -> - V = [Name || {_Scope, Names} <- Version, Name <- Names], - ok = rabbit_misc:write_term_file(schema_filename(), [V]). +write(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]). -with_scope_version(Scope, ErrorHandler, Fun) -> +read_scope_version(Scope) -> case read() of {error, _} = Err -> - ErrorHandler(Err); + Err; {ok, Version} -> - SV = case lists:keysearch(Scope, 1, Version) of + {ok, case lists:keysearch(Scope, 1, categorise_by_scope(Version)) of false -> []; {value, {Scope, SV1}} -> SV1 - end, - {SV2, Result} = Fun(SV), - ok = case SV =:= SV2 of - true -> ok; - false -> write(lists:keystore(Scope, 1, Version, - {Scope, SV2})) - end, - Result + end} end. +write_scope_version(Scope, ScopeVersion) -> + case read() of + {error, _} = Err -> + Err; + {ok, Version} -> + Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version), + {Scope, ScopeVersion}), + ok = write([Name || {_Scope, Names} <- Version1, Name <- Names]) + end. + +%% ------------------------------------------------------------------- + '=~='(VerA, VerB) -> - matches(lists:usort(VerA), lists:usort(VerB)). + lists:usort(VerA) =:= lists:usort(VerB). + +%% ------------------------------------------------------------------- + +desired_version() -> + [Name || Scope <- ?SCOPES, Name <- desired_scope_version(Scope)]. + +desired_scope_version(Scope) -> with_upgrade_graph(fun heads/1, Scope). + +write_desired_version() -> write(desired_version()). + +write_desired_scope_version(Scope) -> + write_scope_version(Scope, desired_scope_version(Scope)). + +upgrades_required(Scope) -> + case read_scope_version(Scope) of + {error, enoent} -> + {error, version_not_available}; + {ok, CurrentHeads} -> + with_upgrade_graph( + fun (G) -> + case unknown_heads(CurrentHeads, G) of + [] -> {ok, upgrades_to_apply(CurrentHeads, G)}; + Unknown -> {error, {future_upgrades_found, Unknown}} + end + end, Scope) + end. + +%% ------------------------------------------------------------------- + +with_upgrade_graph(Fun, Scope) -> + case rabbit_misc:build_acyclic_graph( + fun (Module, Steps) -> vertices(Module, Steps, Scope) end, + fun (Module, Steps) -> edges(Module, Steps, Scope) end, + rabbit_misc:all_module_attributes(rabbit_upgrade)) of + {ok, G} -> try + Fun(G) + after + true = digraph:delete(G) + end; + {error, {vertex, duplicate, StepName}} -> + throw({error, {duplicate_upgrade_step, StepName}}); + {error, {edge, {bad_vertex, StepName}, _From, _To}} -> + throw({error, {dependency_on_unknown_upgrade_step, StepName}}); + {error, {edge, {bad_edge, StepNames}, _From, _To}} -> + throw({error, {cycle_in_upgrade_steps, StepNames}}) + end. + +vertices(Module, Steps, Scope0) -> + [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps, + Scope0 == Scope1]. + +edges(_Module, Steps, Scope0) -> + [{Require, StepName} || {StepName, Scope1, Requires} <- Steps, + Require <- Requires, + Scope0 == Scope1]. +unknown_heads(Heads, G) -> + [H || H <- Heads, digraph:vertex(G, H) =:= false]. + +upgrades_to_apply(Heads, G) -> + %% Take all the vertices which can reach the known heads. That's + %% everything we've already applied. Subtract that from all + %% vertices: that's what we have to apply. + Unsorted = sets:to_list( + sets:subtract( + sets:from_list(digraph:vertices(G)), + sets:from_list(digraph_utils:reaching(Heads, G)))), + %% Form a subgraph from that list and find a topological ordering + %% so we can invoke them in order. + [element(2, digraph:vertex(G, StepName)) || + StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))]. + +heads(G) -> + lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]). %% ------------------------------------------------------------------- -matches([], []) -> - true; -matches([{Scope, SV}|VerA], [{Scope, SV}|VerB]) -> - matches(VerA, VerB); -matches([{Scope, SVA}|VerA], [{Scope, SVB}|VerB]) -> - case {lists:usort(SVA), lists:usort(SVB)} of - {SV, SV} -> matches(VerA, VerB); - _ -> false - end; -matches(_VerA, _VerB) -> - false. - -categorise_by_scope(Heads) when is_list(Heads) -> +categorise_by_scope(Version) when is_list(Version) -> Categorised = [{Scope, Name} || {_Module, Attributes} <- rabbit_misc:all_module_attributes(rabbit_upgrade), {Name, Scope, _Requires} <- Attributes, - lists:member(Name, Heads)], + lists:member(Name, Version)], orddict:to_list( - lists:foldl(fun ({Scope, Name}, Version) -> - rabbit_misc:orddict_cons(Scope, Name, Version) + lists:foldl(fun ({Scope, Name}, CatVersion) -> + rabbit_misc:orddict_cons(Scope, Name, CatVersion) end, orddict:new(), Categorised)). dir() -> rabbit_mnesia:dir(). -- cgit v1.2.1 From 5a390fde517e6f8539f75199b357d064d8c11541 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 09:17:27 +0000 Subject: renamings --- src/rabbit_mnesia.erl | 10 +++++----- src/rabbit_upgrade.erl | 8 ++++---- src/rabbit_version.erl | 51 +++++++++++++++++++++++++------------------------- 3 files changed, 34 insertions(+), 35 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4902cfeb..c598fbb9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -442,7 +442,7 @@ init_db(ClusterNodes, Force) -> {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up ensure_version_ok( - rpc:call(AnotherNode, rabbit_version, read, [])), + rpc:call(AnotherNode, rabbit_version, recorded, [])), IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), @@ -457,7 +457,7 @@ init_db(ClusterNodes, Force) -> %% If we're just starting up a new node we won't have %% a version version_not_available -> - ok = rabbit_version:write_desired_version() + ok = rabbit_version:record_desired() end, ensure_schema_integrity() end; @@ -484,14 +484,14 @@ schema_ok_or_move() -> end. ensure_version_ok({ok, DiscVersion}) -> - DesiredVersion = rabbit_version:desired_version(), + DesiredVersion = rabbit_version:desired(), case rabbit_version:'=~='(DesiredVersion, DiscVersion) of true -> ok; false -> throw({error, {schema_mismatch, DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> - ok = rabbit_version:write_desired_version(). + ok = rabbit_version:record_desired(). create_schema() -> mnesia:stop(), @@ -501,7 +501,7 @@ create_schema() -> cannot_start_mnesia), ok = create_tables(), ok = ensure_schema_integrity(), - ok = rabbit_version:write_desired_version(). + ok = rabbit_version:record_desired(). move_db() -> mnesia:stop(), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 9347cc53..b4e1191e 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -141,13 +141,13 @@ upgrade_mode(AllNodes) -> end; [Another|_] -> ClusterVersion = - case rpc:call(Another, rabbit_version, desired_scope_version, + case rpc:call(Another, rabbit_version, desired_for_scope, [mnesia]) of {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; {badrpc, Reason} -> {unknown, Reason}; V -> V end, - MyVersion = rabbit_version:desired_scope_version(mnesia), + MyVersion = rabbit_version:desired_for_scope(mnesia), case rabbit_version:'=~='(ClusterVersion, MyVersion) of true -> %% The other node(s) have upgraded already, I am not the @@ -208,7 +208,7 @@ secondary_upgrade(AllNodes) -> end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = rabbit_mnesia:init_db(ClusterNodes, true), - ok = rabbit_version:write_desired_scope_version(mnesia), + ok = rabbit_version:record_desired_for_scope(mnesia), ok. nodes_running(Nodes) -> @@ -253,7 +253,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = rabbit_version:write_desired_scope_version(Scope), + ok = rabbit_version:record_desired_for_scope(Scope), ok = rabbit_misc:recursive_delete([BackupDir]), info("~s upgrades: Mnesia backup removed~n", [Scope]), ok = file:delete(LockFile); diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index 2d7ba8e4..e079df4a 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -16,14 +16,14 @@ -module(rabbit_version). --export([read/0, '=~='/2, desired_version/0, desired_scope_version/1, - write_desired_version/0, write_desired_scope_version/1, +-export([recorded/0, '=~='/2, desired/0, desired_for_scope/1, + record_desired/0, record_desired_for_scope/1, upgrades_required/1]). %% ------------------------------------------------------------------- -ifdef(use_specs). --export_type([scope/0, step/0, scope_version/0]). +-export_type([scope/0, step/0]). -type(scope() :: atom()). -type(scope_version() :: [atom()]). @@ -31,12 +31,12 @@ -type(version() :: [atom()]). --spec(read/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec('=~='/2 :: (version(), version()) -> boolean()). --spec(desired_version/0 :: () -> version()). --spec(desired_scope_version/1 :: (scope()) -> scope_version()). --spec(write_desired_version/0 :: () -> 'ok'). --spec(write_desired_scope_version/1 :: +-spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())). +-spec('=~='/2 :: ([A], [A]) -> boolean()). +-spec(desired/0 :: () -> version()). +-spec(desired_for_scope/1 :: (scope()) -> scope_version()). +-spec(record_desired/0 :: () -> 'ok'). +-spec(record_desired_for_scope/1 :: (scope()) -> rabbit_types:ok_or_error(any())). -spec(upgrades_required/1 :: (scope()) -> rabbit_types:ok_or_error2([step()], any())). @@ -49,15 +49,15 @@ %% ------------------------------------------------------------------- -read() -> case rabbit_misc:read_term_file(schema_filename()) of - {ok, [V]} -> {ok, V}; - {error, _} = Err -> Err - end. +recorded() -> case rabbit_misc:read_term_file(schema_filename()) of + {ok, [V]} -> {ok, V}; + {error, _} = Err -> Err + end. -write(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]). +record(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]). -read_scope_version(Scope) -> - case read() of +recorded_for_scope(Scope) -> + case recorded() of {error, _} = Err -> Err; {ok, Version} -> @@ -67,14 +67,14 @@ read_scope_version(Scope) -> end} end. -write_scope_version(Scope, ScopeVersion) -> - case read() of +record_for_scope(Scope, ScopeVersion) -> + case recorded() of {error, _} = Err -> Err; {ok, Version} -> Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version), {Scope, ScopeVersion}), - ok = write([Name || {_Scope, Names} <- Version1, Name <- Names]) + ok = record([Name || {_Scope, Names} <- Version1, Name <- Names]) end. %% ------------------------------------------------------------------- @@ -84,18 +84,17 @@ write_scope_version(Scope, ScopeVersion) -> %% ------------------------------------------------------------------- -desired_version() -> - [Name || Scope <- ?SCOPES, Name <- desired_scope_version(Scope)]. +desired() -> [Name || Scope <- ?SCOPES, Name <- desired_for_scope(Scope)]. -desired_scope_version(Scope) -> with_upgrade_graph(fun heads/1, Scope). +desired_for_scope(Scope) -> with_upgrade_graph(fun heads/1, Scope). -write_desired_version() -> write(desired_version()). +record_desired() -> record(desired()). -write_desired_scope_version(Scope) -> - write_scope_version(Scope, desired_scope_version(Scope)). +record_desired_for_scope(Scope) -> + record_for_scope(Scope, desired_for_scope(Scope)). upgrades_required(Scope) -> - case read_scope_version(Scope) of + case recorded_for_scope(Scope) of {error, enoent} -> {error, version_not_available}; {ok, CurrentHeads} -> -- cgit v1.2.1 From 349c24621ca359b5e6deac9d43ed8cefd0616152 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 09:22:03 +0000 Subject: If we have version_not_available, then it makes sense to have version_mismatch, not schema_mismatch --- src/rabbit_mnesia.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c598fbb9..867da779 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -487,8 +487,7 @@ ensure_version_ok({ok, DiscVersion}) -> DesiredVersion = rabbit_version:desired(), case rabbit_version:'=~='(DesiredVersion, DiscVersion) of true -> ok; - false -> throw({error, {schema_mismatch, - DesiredVersion, DiscVersion}}) + false -> throw({error, {version_mismatch, DesiredVersion, DiscVersion}}) end; ensure_version_ok({error, _}) -> ok = rabbit_version:record_desired(). -- cgit v1.2.1 From e74f420db40c772e77454dd05f32f7c172a2156a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 09:34:13 +0000 Subject: and again, _don't_ record confirm on immediate if we don't deliver... --- src/rabbit_amqqueue_process.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 4ebdb7a3..3f5758ce 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -838,7 +838,10 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> %% queues discarding the message? %% {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), - reply(Delivered, maybe_record_confirm_message(Confirm, State1)); + reply(Delivered, case Delivered of + true -> maybe_record_confirm_message(Confirm, State1); + false -> State1 + end); handle_call({deliver, Delivery}, From, State) -> %% Synchronous, "mandatory" delivery mode. Reply asap. -- cgit v1.2.1 From 753447e36efb88eb1580a93c5331894d93d1621c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 14:41:03 +0000 Subject: Make sure we record if an exchange is actually deleted... --- src/rabbit_binding.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7ddb7814..1a9cbde1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -335,12 +335,13 @@ maybe_auto_delete(XName, Bindings, Deletions) -> [] -> add_deletion(XName, {undefined, not_deleted, Bindings}, Deletions); [X] -> - add_deletion(XName, {X, not_deleted, Bindings}, - case rabbit_exchange:maybe_auto_delete(X) of - not_deleted -> Deletions; - {deleted, Deletions1} -> combine_deletions( - Deletions, Deletions1) - end) + case rabbit_exchange:maybe_auto_delete(X) of + not_deleted -> + add_deletion(XName, {X, not_deleted, Bindings}, Deletions); + {deleted, Deletions1} -> + add_deletion(XName, {X, deleted, Bindings}, + combine_deletions(Deletions, Deletions1)) + end end. delete_forward_routes(Route) -> -- cgit v1.2.1 From 8449616cc83d64477ad4fc69921c1e227f7be3a1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 14:53:30 +0000 Subject: Make code prettier --- src/rabbit_binding.erl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1a9cbde1..6167790e 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -331,18 +331,18 @@ group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings) -> group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc), Removed). maybe_auto_delete(XName, Bindings, Deletions) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> - add_deletion(XName, {undefined, not_deleted, Bindings}, Deletions); - [X] -> - case rabbit_exchange:maybe_auto_delete(X) of - not_deleted -> - add_deletion(XName, {X, not_deleted, Bindings}, Deletions); - {deleted, Deletions1} -> - add_deletion(XName, {X, deleted, Bindings}, - combine_deletions(Deletions, Deletions1)) - end - end. + {Entry, Deletions1} = + case mnesia:read({rabbit_exchange, XName}) of + [] -> {{undefined, not_deleted, Bindings}, Deletions}; + [X] -> case rabbit_exchange:maybe_auto_delete(X) of + not_deleted -> + {{X, not_deleted, Bindings}, Deletions}; + {deleted, Deletions2} -> + {{X, deleted, Bindings}, + combine_deletions(Deletions, Deletions2)} + end + end, + add_deletion(XName, Entry, Deletions1). delete_forward_routes(Route) -> ok = mnesia:delete_object(rabbit_route, Route, write), -- cgit v1.2.1 From d350c5ce08dff4f0ff64d5d294c00b0866932121 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 16:26:25 +0000 Subject: rabbit:stop() is not always called when rabbit is stopping. E.g. q(). doesn't invoke rabbit:stop/0. It does invoke rabbit:stop/1 though. --- src/rabbit.erl | 2 +- src/rabbit_mnesia.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index b1d88a52..5f88b997 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -204,7 +204,6 @@ start() -> end. stop() -> - ok = rabbit_mnesia:record_running_disc_nodes(), ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> @@ -246,6 +245,7 @@ start(normal, []) -> end. stop(_State) -> + ok = rabbit_mnesia:record_running_disc_nodes(), terminated_ok = error_logger:delete_report_handler(rabbit_error_logger), ok = rabbit_alarm:stop(), ok = case rabbit_mnesia:is_clustered() of diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 867da779..4d3267a2 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -378,7 +378,7 @@ delete_cluster_nodes_config() -> end. running_nodes_filename() -> - dir() ++ "/nodes_running_at_shutdown". + filename:join(dir(), "nodes_running_at_shutdown"). record_running_disc_nodes() -> FileName = running_nodes_filename(), -- cgit v1.2.1 From 544081a948a2ecc2e114dfb81aaf268cf10d966d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 16:37:54 +0000 Subject: Improve symmetry: if we write the running_disc_nodes on rabbit shutdown, we should nuke it on rabbit startup. This then means that the prelaunch thingy is always run with the previously_running_disc_nodes file present. I believe this makes no semantic changes, but the improved symmetry is worth having --- src/rabbit.erl | 1 + src/rabbit_upgrade.erl | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 5f88b997..1361d0f4 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -233,6 +233,7 @@ rotate_logs(BinarySuffix) -> start(normal, []) -> case erts_version_check() of ok -> + ok = rabbit_mnesia:delete_previously_running_disc_nodes(), {ok, SupPid} = rabbit_sup:start_link(), true = register(rabbit, self()), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b4e1191e..20f53da2 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -112,8 +112,7 @@ maybe_upgrade_mnesia() -> primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(AllNodes) end - end, - ok = rabbit_mnesia:delete_previously_running_disc_nodes(). + end. upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of -- cgit v1.2.1 From b13bd327e6d58bfe4fdeb8f8c14f666f493fbe54 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 16:53:15 +0000 Subject: Can't call =~= with non-version args, thus shuffle things around a bit. End up saving a line. --- src/rabbit_upgrade.erl | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 20f53da2..875d971a 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -139,24 +139,23 @@ upgrade_mode(AllNodes) -> []) end; [Another|_] -> - ClusterVersion = - case rpc:call(Another, rabbit_version, desired_for_scope, - [mnesia]) of - {badrpc, {'EXIT', {undef, _}}} -> unknown_old_version; - {badrpc, Reason} -> {unknown, Reason}; - V -> V - end, MyVersion = rabbit_version:desired_for_scope(mnesia), - case rabbit_version:'=~='(ClusterVersion, MyVersion) of - true -> - %% The other node(s) have upgraded already, I am not the - %% upgrader - secondary; - false -> - %% The other node(s) are running an unexpected version. - die("Cluster upgrade needed but other nodes are " - "running ~p~nand I want ~p", - [ClusterVersion, MyVersion]) + ErrFun = fun (ClusterVersion) -> + %% The other node(s) are running an + %% unexpected version. + die("Cluster upgrade needed but other nodes are " + "running ~p~nand I want ~p", + [ClusterVersion, MyVersion]) + end, + case rpc:call(Another, rabbit_version, desired_for_scope, + [mnesia]) of + {badrpc, {'EXIT', {undef, _}}} -> ErrFun(unknown_old_version); + {badrpc, Reason} -> ErrFun({unknown, Reason}); + CV -> case rabbit_version:'=~='( + MyVersion, CV) of + true -> secondary; + false -> ErrFun(CV) + end end end. -- cgit v1.2.1 From 2398d1d0ee6180c9dbd37ba325bbf071fd73d62e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 17 Mar 2011 17:57:38 +0000 Subject: Not 100% beautiful, but this gets serial ids into the hands of add_binding and remove_binding. --- include/rabbit.hrl | 1 + include/rabbit_exchange_type_spec.hrl | 1 + src/rabbit_amqqueue.erl | 4 +-- src/rabbit_binding.erl | 56 +++++++++++++++++++++++++++-------- src/rabbit_exchange.erl | 19 ++++++------ src/rabbit_exchange_type.erl | 6 ++++ src/rabbit_exchange_type_direct.erl | 4 ++- src/rabbit_exchange_type_fanout.erl | 4 ++- src/rabbit_exchange_type_headers.erl | 4 ++- src/rabbit_exchange_type_topic.erl | 12 ++++---- src/rabbit_misc.erl | 14 ++++----- src/rabbit_mnesia.erl | 4 +++ src/rabbit_upgrade_functions.erl | 6 ++++ 13 files changed, 96 insertions(+), 39 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 9f483c30..99608be4 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -43,6 +43,7 @@ -record(resource, {virtual_host, kind, name}). -record(exchange, {name, type, durable, auto_delete, internal, arguments}). +-record(exchange_serial, {name, serial}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, arguments, pid}). diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 45c475d8..8774b6ce 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -17,6 +17,7 @@ -ifdef(use_specs). -spec(description/0 :: () -> [{atom(), any()}]). +-spec(serialise_events/0 :: () -> boolean()). -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index c7391965..102ea13b 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -433,8 +433,8 @@ internal_delete(QueueName) -> case mnesia:wread({rabbit_queue, QueueName}) of [] -> rabbit_misc:const({error, not_found}); [_] -> Deletions = internal_delete1(QueueName), - fun (Tx) -> ok = rabbit_binding:process_deletions( - Deletions, Tx) + fun (Tx) -> rabbit_binding:process_deletions( + Deletions, Tx) end end end). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7ddb7814..9aacfaa4 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -124,12 +124,7 @@ add(Binding, InnerFun) -> case mnesia:read({rabbit_route, B}) of [] -> ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback( - Src, add_binding, [Tx, Src, B]), - rabbit_event:notify_if( - not Tx, binding_created, info(B)) - end; + fun (Tx) -> process_addition(Src, B, Tx) end; [_] -> fun rabbit_misc:const_ok/1 end; {error, _} = Err -> @@ -161,7 +156,7 @@ remove(Binding, InnerFun) -> {error, _} = Err -> rabbit_misc:const(Err); {ok, Deletions} -> - fun (Tx) -> ok = process_deletions(Deletions, Tx) end + fun (Tx) -> process_deletions(Deletions, Tx) end end end). @@ -404,19 +399,54 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> anything_but(not_deleted, Deleted1, Deleted2), [Bindings1 | Bindings2]}. -process_deletions(Deletions, Tx) -> +process_addition(Src, B, State) -> + Serial = serial(Src, State, fun (_, S) -> S end), + Tx = State =:= transaction, + Arg = case Tx of true -> transaction; _ -> Serial end, + ok = rabbit_exchange:callback(Src, add_binding, [Arg, Src, B]), + rabbit_event:notify_if(not Tx, binding_created, info(B)), + case Tx of true -> Serial; false -> ok end. + +process_deletions(Deletions, State) -> + Tx = State =:= transaction, + Next = dict:fold( - fun (_XName, {X, Deleted, Bindings}, ok) -> + fun (_XName, {X, Deleted, Bindings}, Serials) -> FlatBindings = lists:flatten(Bindings), [rabbit_event:notify_if(not Tx, binding_deleted, info(B)) || B <- FlatBindings], case Deleted of not_deleted -> - rabbit_exchange:callback(X, remove_bindings, - [Tx, X, FlatBindings]); + Serial = serial(X, State, fun dict:fetch/2), + Arg = case Tx of true -> transaction; _ -> Serial end, + ok = rabbit_exchange:callback(X, remove_bindings, + [Arg, X, FlatBindings]), + dict:store(X, Serial, Serials); deleted -> rabbit_event:notify_if(not Tx, exchange_deleted, [{name, X#exchange.name}]), - rabbit_exchange:callback(X, delete, [Tx, X, FlatBindings]) + ok = rabbit_exchange:callback(X, delete, + [Tx, X, FlatBindings]), + Serials end - end, ok, Deletions). + end, dict:new(), Deletions), + case Tx of true -> Next; false -> ok end. + +serial(X, State, Fun) -> + case rabbit_exchange:callback(X, serialise_events, []) of + true -> case State of + transaction -> incr_serial(X); + _ -> Fun(X, State) + end; + false -> none + end. + +incr_serial(#exchange{name = Name}) -> + Prev = case mnesia:read(rabbit_exchange_serial, Name, write) of + [] -> 0; + [#exchange_serial{serial = S}] -> S + end, + Serial = Prev + 1, + mnesia:write(rabbit_exchange_serial, + #exchange_serial{name = Name, serial = Serial}, write), + Serial. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a463e570..09648fcf 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -264,12 +264,13 @@ process_route(#resource{kind = queue} = QName, {WorkList, SeenXs, [QName | QNames]}. call_with_exchange(XName, Fun, PrePostCommitFun) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> case mnesia:read({rabbit_exchange, XName}) of - [] -> {error, not_found}; - [X] -> Fun(X) - end - end, PrePostCommitFun). + rabbit_misc:execute_mnesia_tx_with_tail( + fun () -> Result = case mnesia:read({rabbit_exchange, XName}) of + [] -> {error, not_found}; + [X] -> Fun(X) + end, + fun(Tx) -> PrePostCommitFun(Result, Tx) end + end). delete(XName, IfUnused) -> call_with_exchange( @@ -279,9 +280,9 @@ delete(XName, IfUnused) -> false -> fun unconditional_delete/1 end, fun ({deleted, X, Bs, Deletions}, Tx) -> - ok = rabbit_binding:process_deletions( - rabbit_binding:add_deletion( - XName, {X, deleted, Bs}, Deletions), Tx); + rabbit_binding:process_deletions( + rabbit_binding:add_deletion( + XName, {X, deleted, Bs}, Deletions), Tx); (Error = {error, _InUseOrNotFound}, _Tx) -> Error end). diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 547583e9..b34d1aec 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -21,6 +21,12 @@ behaviour_info(callbacks) -> [ {description, 0}, + + %% Should Rabbit ensure that all events delivered to this + %% exchange can be serialised (they might still be delivered out + %% of order, but there'll be a serial number). + {serialise_events, 0}, + {route, 2}, %% called BEFORE declaration, to check args etc; may exit with #amqp_error{} diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 349c2f6e..d1ea62f3 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -35,6 +35,8 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + route(#exchange{name = Name}, #delivery{message = #basic_message{routing_keys = Routes}}) -> rabbit_router:match_routing_key(Name, Routes). diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index bc5293c8..9b6e68d8 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -35,6 +35,8 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index d3529b06..1480afc8 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -41,6 +41,8 @@ description() -> [{name, <<"headers">>}, {description, <<"AMQP headers exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + route(#exchange{name = Name}, #delivery{message = #basic_message{content = Content}}) -> Headers = case (Content#content.properties)#'P_basic'.headers of diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..9a9cbc47 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -38,6 +38,8 @@ description() -> [{name, <<"topic">>}, {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + %% NB: This may return duplicate results in some situations (that's ok) route(#exchange{name = X}, #delivery{message = #basic_message{routing_keys = Routes}}) -> @@ -62,12 +64,12 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_binding(true, _Exchange, Binding) -> +add_binding(transaction, _Exchange, Binding) -> internal_add_binding(Binding); -add_binding(false, _Exchange, _Binding) -> +add_binding(none, _Exchange, _Binding) -> ok. -remove_bindings(true, #exchange{name = X}, Bs) -> +remove_bindings(transaction, #exchange{name = X}, Bs) -> %% The remove process is split into two distinct phases. In the %% first phase we gather the lists of bindings and edges to %% delete, then in the second phase we process all the @@ -86,7 +88,7 @@ remove_bindings(true, #exchange{name = X}, Bs) -> [trie_remove_edge(X, Parent, Node, W) || {Node, {Parent, W, {0, 0}}} <- gb_trees:to_list(Paths)], ok; -remove_bindings(false, _X, _Bs) -> +remove_bindings(none, _X, _Bs) -> ok. maybe_add_path(_X, [{root, none}], PathAcc) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index e79a58a1..a869a72c 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -409,13 +409,13 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> execute_mnesia_tx_with_tail(TxFun) -> case mnesia:is_transaction() of true -> execute_mnesia_transaction(TxFun); - false -> TailFun = execute_mnesia_transaction( - fun () -> - TailFun1 = TxFun(), - TailFun1(true), - TailFun1 - end), - TailFun(false) + false -> {TailFun, TailRes} = execute_mnesia_transaction( + fun () -> + TailFun1 = TxFun(), + Res1 = TailFun1(transaction), + {TailFun1, Res1} + end), + TailFun(TailRes) end. ensure_ok(ok, _) -> ok; diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 66436920..3d010acf 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -208,6 +208,10 @@ table_definitions() -> [{record_name, exchange}, {attributes, record_info(fields, exchange)}, {match, #exchange{name = exchange_name_match(), _='_'}}]}, + {rabbit_exchange_serial, + [{record_name, exchange_serial}, + {attributes, record_info(fields, exchange_serial)}, + {match, #exchange_serial{name = exchange_name_match(), _='_'}}]}, {rabbit_durable_queue, [{record_name, amqqueue}, {attributes, record_info(fields, amqqueue)}, diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index b9dbe418..8b3b833c 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -26,6 +26,7 @@ -rabbit_upgrade({internal_exchanges, []}). -rabbit_upgrade({user_to_internal_user, [hash_passwords]}). -rabbit_upgrade({topic_trie, []}). +-rabbit_upgrade({exchange_event_serialisation, []}). %% ------------------------------------------------------------------- @@ -37,6 +38,7 @@ -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). -spec(topic_trie/0 :: () -> 'ok'). +-spec(exchange_event_serialisation/0 :: () -> 'ok'). -endif. @@ -101,6 +103,10 @@ topic_trie() -> {attributes, [trie_binding, value]}, {type, ordered_set}]). +exchange_event_serialisation() -> + create(rabbit_exchange_serial, [{record_name, exchange_serial}, + {attributes, [name, serial]}]). + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 0f23637e28f62110bab3bff3715d5fa8f5dc4c17 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 17 Mar 2011 18:02:50 +0000 Subject: cosmetic --- src/rabbit_msg_store.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index a08bbd70..2b162f9d 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1147,7 +1147,7 @@ orddict_store(Key, Val, Dict) -> orddict:store(Key, Val, Dict). update_pending_confirms(Fun, CRef, - State = #msstate { clients = Clients, + State = #msstate { clients = Clients, cref_to_msg_ids = CTM }) -> case dict:fetch(CRef, Clients) of {undefined, _CloseFDsFun} -> State; -- cgit v1.2.1 From 3ae4322d27ee90b19d774418c43fd7e8a0b75ac4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Mar 2011 12:15:51 +0000 Subject: Ensure mnesia is stopped for the local upgrade backup. --- src/rabbit_upgrade.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b4e1191e..2c31e602 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -182,7 +182,6 @@ primary_upgrade(Upgrades, Nodes) -> mnesia, Upgrades, fun () -> - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), force_tables(), case Others of [] -> ok; @@ -227,7 +226,8 @@ maybe_upgrade_local() -> {error, version_not_available} -> version_not_available; {error, _} = Err -> throw(Err); {ok, []} -> ok; - {ok, Upgrades} -> apply_upgrades(local, Upgrades, + {ok, Upgrades} -> mnesia:stop(), + apply_upgrades(local, Upgrades, fun () -> ok end) end. @@ -249,6 +249,7 @@ apply_upgrades(Scope, Upgrades, Fun) -> ok = file:delete(lock_filename(BackupDir)), info("~s upgrades: Mnesia dir backed up to ~p~n", [Scope, BackupDir]), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), Fun(), [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", -- cgit v1.2.1 From 82ea108bc5c4f17283f0b0080f7dfcf9baea123d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Mar 2011 13:38:20 +0000 Subject: Take a single backup before any upgrade, remove it when we're all clear. --- src/rabbit_upgrade.erl | 106 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 69 insertions(+), 37 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index c061cd49..d56b50b2 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -16,7 +16,8 @@ -module(rabbit_upgrade). --export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). +-export([maybe_backup/0, maybe_upgrade_mnesia/0, maybe_upgrade_local/0, + maybe_remove_backup/0]). -include("rabbit.hrl"). @@ -27,8 +28,10 @@ -ifdef(use_specs). +-spec(maybe_backup/0 :: () -> 'ok'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). +-spec(maybe_remove_backup/0 :: () -> 'ok'). -endif. @@ -91,11 +94,66 @@ %% ------------------------------------------------------------------- +maybe_backup() -> + case backup_required() of + true -> backup(); + _ -> ok + end. + +backup() -> + rabbit:prepare(), %% Ensure we have logs for this + LockFile = lock_filename(dir()), + case rabbit_misc:lock_file(LockFile) of + ok -> + BackupDir = backup_dir(), + case rabbit_mnesia:copy_db(BackupDir) of + ok -> + %% We need to make the backup after creating the + %% lock file so that it protects us from trying to + %% overwrite the backup. Unfortunately this means + %% the lock file exists in the backup too, which + %% is not intuitive. Remove it. + ok = file:delete(lock_filename(BackupDir)), + info("upgrades: Mnesia dir backed up to ~p~n", [BackupDir]); + {error, E} -> + %% If we can't backup, the upgrade hasn't started + %% hence we don't need the lockfile since the real + %% mnesia dir is the good one. + ok = file:delete(LockFile), + throw({could_not_back_up_mnesia_dir, E}) + end; + {error, eexist} -> + throw({error, previous_upgrade_failed}) + end. + + +maybe_remove_backup() -> + case file:read_file_info(backup_dir()) of + {ok, _} -> remove_backup(); + _ -> ok + end. + +remove_backup() -> + LockFile = lock_filename(dir()), + BackupDir = backup_dir(), + ok = rabbit_misc:recursive_delete([BackupDir]), + info("upgrades: Mnesia backup removed~n", []), + ok = file:delete(LockFile). + +backup_required() -> + case {rabbit_version:upgrades_required(mnesia), + rabbit_version:upgrades_required(local)} of + {{ok, []}, {ok, []}} -> false; + {_, {ok, _}} -> true; + {{ok, _}, _} -> true; + _ -> false + end. + maybe_upgrade_mnesia() -> + maybe_backup(), AllNodes = rabbit_mnesia:all_clustered_nodes(), case rabbit_version:upgrades_required(mnesia) of {error, version_not_available} -> - rabbit:prepare(), %% Ensure we have logs for this case AllNodes of [_] -> ok; _ -> die("Cluster upgrade needed but upgrading from " @@ -227,45 +285,18 @@ maybe_upgrade_local() -> {ok, Upgrades} -> mnesia:stop(), apply_upgrades(local, Upgrades, fun () -> ok end) - end. + end, + maybe_remove_backup(). %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> - LockFile = lock_filename(dir()), - case rabbit_misc:lock_file(LockFile) of - ok -> - BackupDir = dir() ++ "-upgrade-backup", - info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), - case rabbit_mnesia:copy_db(BackupDir) of - ok -> - %% We need to make the backup after creating the - %% lock file so that it protects us from trying to - %% overwrite the backup. Unfortunately this means - %% the lock file exists in the backup too, which - %% is not intuitive. Remove it. - ok = file:delete(lock_filename(BackupDir)), - info("~s upgrades: Mnesia dir backed up to ~p~n", - [Scope, BackupDir]), - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - Fun(), - [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], - info("~s upgrades: All upgrades applied successfully~n", - [Scope]), - ok = rabbit_version:record_desired_for_scope(Scope), - ok = rabbit_misc:recursive_delete([BackupDir]), - info("~s upgrades: Mnesia backup removed~n", [Scope]), - ok = file:delete(LockFile); - {error, E} -> - %% If we can't backup, the upgrade hasn't started - %% hence we don't need the lockfile since the real - %% mnesia dir is the good one. - ok = file:delete(LockFile), - throw({could_not_back_up_mnesia_dir, E}) - end; - {error, eexist} -> - throw({error, previous_upgrade_failed}) - end. + info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + Fun(), + [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], + info("~s upgrades: All upgrades applied successfully~n", [Scope]), + ok = rabbit_version:record_desired_for_scope(Scope). apply_upgrade(Scope, {M, F}) -> info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), @@ -276,6 +307,7 @@ apply_upgrade(Scope, {M, F}) -> dir() -> rabbit_mnesia:dir(). lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). +backup_dir() -> dir() ++ "-upgrade-backup". %% NB: we cannot use rabbit_log here since it may not have been %% started yet -- cgit v1.2.1 From 9462d45577e96eeee77b46430a84720814fdc147 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Mar 2011 16:15:39 +0000 Subject: I think this is tidier. --- src/rabbit_binding.erl | 75 +++++++++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 31 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 9aacfaa4..d2767d15 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -124,7 +124,11 @@ add(Binding, InnerFun) -> case mnesia:read({rabbit_route, B}) of [] -> ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), - fun (Tx) -> process_addition(Src, B, Tx) end; + fun (Tx) -> + ok = rabbit_exchange:callback( + Src, add_binding, [Tx, Src, B]), + process_addition(Src, B, Tx) + end; [_] -> fun rabbit_misc:const_ok/1 end; {error, _} = Err -> @@ -399,49 +403,58 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> anything_but(not_deleted, Deleted1, Deleted2), [Bindings1 | Bindings2]}. -process_addition(Src, B, State) -> - Serial = serial(Src, State, fun (_, S) -> S end), - Tx = State =:= transaction, - Arg = case Tx of true -> transaction; _ -> Serial end, - ok = rabbit_exchange:callback(Src, add_binding, [Arg, Src, B]), - rabbit_event:notify_if(not Tx, binding_created, info(B)), - case Tx of true -> Serial; false -> ok end. - -process_deletions(Deletions, State) -> - Tx = State =:= transaction, - Next = +process_addition(Src, _B, transaction) -> + serial(Src); + +process_addition(_Src, B, _Serial) -> + ok = rabbit_event:notify(binding_created, info(B)). + +process_deletions(Deletions, transaction) -> + process_deletions( + fun (X, Bindings, Acc) -> + pd_callback(transaction, remove_bindings, X, Bindings), + dict:store(X, serial(X), Acc) + end, + fun rabbit_misc:const_ok/1, + Deletions, dict:new(), true); + +process_deletions(Deletions, Serials) -> + process_deletions( + fun (X, Bindings, Acc) -> + pd_callback(dict:fetch(X, Serials), remove_bindings, X, Bindings), + Acc + end, + fun (X) -> + rabbit_event:notify(exchange_deleted, [{name, X#exchange.name}]) + end, + Deletions, ok, false). + +process_deletions(NotDeletedFun, DeletedFun, Deletions, Acc0, Tx) -> dict:fold( - fun (_XName, {X, Deleted, Bindings}, Serials) -> + fun (_XName, {X, Deleted, Bindings}, Acc) -> FlatBindings = lists:flatten(Bindings), [rabbit_event:notify_if(not Tx, binding_deleted, info(B)) || B <- FlatBindings], case Deleted of not_deleted -> - Serial = serial(X, State, fun dict:fetch/2), - Arg = case Tx of true -> transaction; _ -> Serial end, - ok = rabbit_exchange:callback(X, remove_bindings, - [Arg, X, FlatBindings]), - dict:store(X, Serial, Serials); + NotDeletedFun(X, FlatBindings, Acc); deleted -> - rabbit_event:notify_if(not Tx, exchange_deleted, - [{name, X#exchange.name}]), - ok = rabbit_exchange:callback(X, delete, - [Tx, X, FlatBindings]), - Serials + DeletedFun(X), + pd_callback(Tx, delete, X, Bindings), + Acc end - end, dict:new(), Deletions), - case Tx of true -> Next; false -> ok end. + end, Acc0, Deletions). -serial(X, State, Fun) -> +pd_callback(Arg, CB, X, Bindings) -> + ok = rabbit_exchange:callback(X, CB, [Arg, X, Bindings]). + +serial(X) -> case rabbit_exchange:callback(X, serialise_events, []) of - true -> case State of - transaction -> incr_serial(X); - _ -> Fun(X, State) - end; + true -> next_serial(X); false -> none end. -incr_serial(#exchange{name = Name}) -> +next_serial(#exchange{name = Name}) -> Prev = case mnesia:read(rabbit_exchange_serial, Name, write) of [] -> 0; [#exchange_serial{serial = S}] -> S -- cgit v1.2.1 From 9c46af9fe5bbd03d96a51a67d0dc576fa8573415 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Mar 2011 16:36:02 +0000 Subject: Rip out msg_store:release --- src/rabbit_msg_store.erl | 11 +---------- src/rabbit_tests.erl | 2 -- src/rabbit_variable_queue.erl | 7 +------ 3 files changed, 2 insertions(+), 18 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 2b162f9d..4ec77006 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -21,7 +21,7 @@ -export([start_link/4, successfully_recovered_state/1, client_init/4, client_terminate/1, client_delete_and_terminate/1, client_ref/1, close_all_indicated/1, - write/3, read/2, contains/2, remove/2, release/2, sync/3]). + write/3, read/2, contains/2, remove/2, sync/3]). -export([sync/1, set_maximum_since_use/2, has_readers/2, combine_files/3, delete_file/2]). %% internal @@ -153,7 +153,6 @@ {rabbit_types:ok(msg()) | 'not_found', client_msstate()}). -spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()). -spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). --spec(release/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok'). -spec(sync/3 :: ([rabbit_types:msg_id()], fun (() -> any()), client_msstate()) -> 'ok'). @@ -457,8 +456,6 @@ contains(MsgId, CState) -> server_call(CState, {contains, MsgId}). remove([], _CState) -> ok; remove(MsgIds, CState = #client_msstate { client_ref = CRef }) -> server_cast(CState, {remove, CRef, MsgIds}). -release([], _CState) -> ok; -release(MsgIds, CState) -> server_cast(CState, {release, MsgIds}). sync(MsgIds, K, CState) -> server_cast(CState, {sync, MsgIds, K}). sync(Server) -> @@ -781,12 +778,6 @@ handle_cast({remove, CRef, MsgIds}, State) -> noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(MsgIds), removed, State1))); -handle_cast({release, MsgIds}, State = - #msstate { dedup_cache_ets = DedupCacheEts }) -> - lists:foreach( - fun (MsgId) -> decrement_cache(DedupCacheEts, MsgId) end, MsgIds), - noreply(State); - handle_cast({sync, MsgIds, K}, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ca046c91..ad8e2485 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1813,8 +1813,6 @@ test_msg_store() -> true = msg_store_contains(true, MsgIds2ndHalf, MSCState2), %% read the second half again MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2), - %% release the second half, just for fun (aka code coverage) - ok = rabbit_msg_store:release(MsgIds2ndHalf, MSCState3), %% read the second half again, just for fun (aka code coverage) MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3), ok = rabbit_msg_store:client_terminate(MSCState4), diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 1b29756b..8c9d62a7 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -725,7 +725,7 @@ requeue(AckTags, MsgPropsFun, State) -> needs_confirming = false } end, a(reduce_memory_use( - ack(fun msg_store_release/3, + ack(fun (_, _, _) -> ok end, fun (#msg_status { msg = Msg, msg_props = MsgProps }, State1) -> {_SeqId, State2} = publish(Msg, MsgPropsFun1(MsgProps), true, false, State1), @@ -969,11 +969,6 @@ msg_store_remove(MSCState, IsPersistent, MsgIds) -> MSCState, IsPersistent, fun (MCSState1) -> rabbit_msg_store:remove(MsgIds, MCSState1) end). -msg_store_release(MSCState, IsPersistent, MsgIds) -> - with_immutable_msg_store_state( - MSCState, IsPersistent, - fun (MCSState1) -> rabbit_msg_store:release(MsgIds, MCSState1) end). - msg_store_sync(MSCState, IsPersistent, MsgIds, Fun) -> with_immutable_msg_store_state( MSCState, IsPersistent, -- cgit v1.2.1 From b8f6018e0dc8ee6f5e67ee1d58fafa938185c42d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Mar 2011 16:45:53 +0000 Subject: Rip out dedup cache --- src/rabbit_msg_store.erl | 129 ++++++++++------------------------------------- 1 file changed, 28 insertions(+), 101 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 4ec77006..bc68d2cd 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -67,7 +67,6 @@ gc_pid, %% pid of our GC file_handles_ets, %% tid of the shared file handles table file_summary_ets, %% tid of the file summary table - dedup_cache_ets, %% tid of dedup cache table cur_file_cache_ets, %% tid of current file cache table dying_clients, %% set of dying clients clients, %% map of references of all registered clients @@ -87,7 +86,6 @@ gc_pid, file_handles_ets, file_summary_ets, - dedup_cache_ets, cur_file_cache_ets }). @@ -130,7 +128,6 @@ gc_pid :: pid(), file_handles_ets :: ets:tid(), file_summary_ets :: ets:tid(), - dedup_cache_ets :: ets:tid(), cur_file_cache_ets :: ets:tid()}). -type(msg_ref_delta_gen(A) :: fun ((A) -> 'finished' | @@ -395,7 +392,7 @@ successfully_recovered_state(Server) -> client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) -> {IState, IModule, Dir, GCPid, - FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts} = + FileHandlesEts, FileSummaryEts, CurFileCacheEts} = gen_server2:call( Server, {new_client_state, Ref, MsgOnDiskFun, CloseFDsFun}, infinity), #client_msstate { server = Server, @@ -407,7 +404,6 @@ client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) -> gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts }. client_terminate(CState = #client_msstate { client_ref = Ref }) -> @@ -428,27 +424,16 @@ write(MsgId, Msg, ok = server_cast(CState, {write, CRef, MsgId}). read(MsgId, - CState = #client_msstate { dedup_cache_ets = DedupCacheEts, - cur_file_cache_ets = CurFileCacheEts }) -> - %% 1. Check the dedup cache - case fetch_and_increment_cache(DedupCacheEts, MsgId) of - not_found -> - %% 2. Check the cur file cache - case ets:lookup(CurFileCacheEts, MsgId) of - [] -> - Defer = fun() -> - {server_call(CState, {read, MsgId}), CState} - end, - case index_lookup_positive_ref_count(MsgId, CState) of - not_found -> Defer(); - MsgLocation -> client_read1(MsgLocation, Defer, CState) - end; - [{MsgId, Msg, _CacheRefCount}] -> - %% Although we've found it, we don't know the - %% refcount, so can't insert into dedup cache - {{ok, Msg}, CState} + CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) -> + %% Check the cur file cache + case ets:lookup(CurFileCacheEts, MsgId) of + [] -> + Defer = fun() -> {server_call(CState, {read, MsgId}), CState} end, + case index_lookup_positive_ref_count(MsgId, CState) of + not_found -> Defer(); + MsgLocation -> client_read1(MsgLocation, Defer, CState) end; - Msg -> + [{MsgId, Msg, _CacheRefCount}] -> {{ok, Msg}, CState} end. @@ -514,7 +499,6 @@ client_read2(false, _Right, client_read3(#msg_location { msg_id = MsgId, file = File }, Defer, CState = #client_msstate { file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, gc_pid = GCPid, client_ref = Ref }) -> Release = @@ -571,8 +555,8 @@ client_read3(#msg_location { msg_id = MsgId, file = File }, Defer, %% Could the msg_store now mark the file to be %% closed? No: marks for closing are issued only %% when the msg_store has locked the file. - {Msg, CState2} = %% This will never be the current file - read_from_disk(MsgLocation, CState1, DedupCacheEts), + %% This will never be the current file + {Msg, CState2} = read_from_disk(MsgLocation, CState1), Release(), %% this MUST NOT fail with badarg {{ok, Msg}, CState2}; #msg_location {} = MsgLocation -> %% different file! @@ -636,7 +620,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> %% CleanShutdown <=> msg location index and file_summary both %% recovered correctly. - DedupCacheEts = ets:new(rabbit_msg_store_dedup_cache, [set, public]), FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles, [ordered_set, public]), CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]), @@ -666,7 +649,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) -> gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts, dying_clients = sets:new(), clients = Clients, @@ -717,14 +699,12 @@ handle_call({new_client_state, CRef, MsgOnDiskFun, CloseFDsFun}, _From, index_module = IndexModule, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts, clients = Clients, gc_pid = GCPid }) -> Clients1 = dict:store(CRef, {MsgOnDiskFun, CloseFDsFun}, Clients), - reply({IndexState, IndexModule, Dir, GCPid, - FileHandlesEts, FileSummaryEts, DedupCacheEts, CurFileCacheEts}, - State #msstate { clients = Clients1 }); + reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts, + CurFileCacheEts}, State #msstate { clients = Clients1 }); handle_call({client_terminate, CRef}, _From, State) -> reply(ok, clear_client(CRef, State)); @@ -831,7 +811,6 @@ terminate(_Reason, State = #msstate { index_state = IndexState, gc_pid = GCPid, file_handles_ets = FileHandlesEts, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts, clients = Clients, dir = Dir }) -> @@ -846,8 +825,7 @@ terminate(_Reason, State = #msstate { index_state = IndexState, end, State3 = close_all_handles(State1), store_file_summary(FileSummaryEts, Dir), - [ets:delete(T) || - T <- [FileSummaryEts, DedupCacheEts, FileHandlesEts, CurFileCacheEts]], + [ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts, CurFileCacheEts]], IndexModule:terminate(IndexState), store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, {index_module, IndexModule}], Dir), @@ -966,26 +944,18 @@ write_message(MsgId, Msg, sum_valid_data = SumValid + TotalSize, sum_file_size = SumFileSize + TotalSize }). -read_message(MsgId, From, - State = #msstate { dedup_cache_ets = DedupCacheEts }) -> +read_message(MsgId, From, State) -> case index_lookup_positive_ref_count(MsgId, State) of - not_found -> - gen_server2:reply(From, not_found), - State; - MsgLocation -> - case fetch_and_increment_cache(DedupCacheEts, MsgId) of - not_found -> read_message1(From, MsgLocation, State); - Msg -> gen_server2:reply(From, {ok, Msg}), - State - end + not_found -> gen_server2:reply(From, not_found), + State; + MsgLocation -> read_message1(From, MsgLocation, State) end. -read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, - file = File, offset = Offset } = MsgLoc, +read_message1(From, #msg_location { msg_id = MsgId, file = File, + offset = Offset } = MsgLoc, State = #msstate { current_file = CurFile, current_file_handle = CurHdl, file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts, cur_file_cache_ets = CurFileCacheEts }) -> case File =:= CurFile of true -> {Msg, State1} = @@ -998,10 +968,8 @@ read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, true -> file_handle_cache:flush(CurHdl); false -> ok end, - read_from_disk(MsgLoc, State, DedupCacheEts); + read_from_disk(MsgLoc, State); [{MsgId, Msg1, _CacheRefCount}] -> - ok = maybe_insert_into_cache( - DedupCacheEts, RefCount, MsgId, Msg1), {Msg1, State} end, gen_server2:reply(From, {ok, Msg}), @@ -1011,17 +979,14 @@ read_message1(From, #msg_location { msg_id = MsgId, ref_count = RefCount, case Locked of true -> add_to_pending_gc_completion({read, MsgId, From}, File, State); - false -> {Msg, State1} = - read_from_disk(MsgLoc, State, DedupCacheEts), + false -> {Msg, State1} = read_from_disk(MsgLoc, State), gen_server2:reply(From, {ok, Msg}), State1 end end. -read_from_disk(#msg_location { msg_id = MsgId, ref_count = RefCount, - file = File, offset = Offset, - total_size = TotalSize }, - State, DedupCacheEts) -> +read_from_disk(#msg_location { msg_id = MsgId, file = File, offset = Offset, + total_size = TotalSize }, State) -> {Hdl, State1} = get_read_handle(File, State), {ok, Offset} = file_handle_cache:position(Hdl, Offset), {ok, {MsgId, Msg}} = @@ -1037,7 +1002,6 @@ read_from_disk(#msg_location { msg_id = MsgId, ref_count = RefCount, {proc_dict, get()} ]}} end, - ok = maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg), {Msg, State1}. contains_message(MsgId, From, @@ -1056,8 +1020,7 @@ contains_message(MsgId, From, end. remove_message(MsgId, CRef, - State = #msstate { file_summary_ets = FileSummaryEts, - dedup_cache_ets = DedupCacheEts }) -> + State = #msstate { file_summary_ets = FileSummaryEts }) -> case should_mask_action(CRef, MsgId, State) of {true, _Location} -> State; @@ -1078,8 +1041,7 @@ remove_message(MsgId, CRef, %% don't remove from CUR_FILE_CACHE_ETS_NAME here %% because there may be further writes in the mailbox %% for the same msg. - 1 -> ok = remove_cache_entry(DedupCacheEts, MsgId), - case ets:lookup(FileSummaryEts, File) of + 1 -> case ets:lookup(FileSummaryEts, File) of [#file_summary { locked = true }] -> add_to_pending_gc_completion( {remove, MsgId, CRef}, File, State); @@ -1089,8 +1051,7 @@ remove_message(MsgId, CRef, File, adjust_valid_total_size(File, -TotalSize, State)) end; - _ -> ok = decrement_cache(DedupCacheEts, MsgId), - ok = Dec(), + _ -> ok = Dec(), State end end. @@ -1313,12 +1274,6 @@ list_sorted_file_names(Dir, Ext) -> %% message cache helper functions %%---------------------------------------------------------------------------- -maybe_insert_into_cache(DedupCacheEts, RefCount, MsgId, Msg) - when RefCount > 1 -> - update_msg_cache(DedupCacheEts, MsgId, Msg); -maybe_insert_into_cache(_DedupCacheEts, _RefCount, _MsgId, _Msg) -> - ok. - update_msg_cache(CacheEts, MsgId, Msg) -> case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of true -> ok; @@ -1327,34 +1282,6 @@ update_msg_cache(CacheEts, MsgId, Msg) -> fun () -> update_msg_cache(CacheEts, MsgId, Msg) end) end. -remove_cache_entry(DedupCacheEts, MsgId) -> - true = ets:delete(DedupCacheEts, MsgId), - ok. - -fetch_and_increment_cache(DedupCacheEts, MsgId) -> - case ets:lookup(DedupCacheEts, MsgId) of - [] -> - not_found; - [{_MsgId, Msg, _RefCount}] -> - safe_ets_update_counter_ok( - DedupCacheEts, MsgId, {3, +1}, - %% someone has deleted us in the meantime, insert us - fun () -> ok = update_msg_cache(DedupCacheEts, MsgId, Msg) end), - Msg - end. - -decrement_cache(DedupCacheEts, MsgId) -> - true = safe_ets_update_counter( - DedupCacheEts, MsgId, {3, -1}, - fun (N) when N =< 0 -> true = ets:delete(DedupCacheEts, MsgId); - (_N) -> true - end, - %% MsgId is not in there because although it's been - %% delivered, it's never actually been read (think: - %% persistent message held in RAM) - fun () -> true end), - ok. - %%---------------------------------------------------------------------------- %% index %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 5b5e7d0b3324a126c8b45aa33dc8d28e6046ae43 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 18 Mar 2011 16:50:44 +0000 Subject: Probably makes sense to remove the serial number on delete. --- src/rabbit_exchange.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 09648fcf..35612153 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -307,5 +307,6 @@ conditional_delete(X = #exchange{name = XName}) -> unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_durable_exchange, XName}), ok = mnesia:delete({rabbit_exchange, XName}), + ok = mnesia:delete({rabbit_exchange_serial, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. -- cgit v1.2.1 From 5f295dc115d1d93428377051530f79ca26064c20 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 18 Mar 2011 17:57:04 +0000 Subject: Well I thought =~= was beautiful and appropriately approximate to == --- src/rabbit_mnesia.erl | 2 +- src/rabbit_upgrade.erl | 2 +- src/rabbit_version.erl | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4d3267a2..869f09a1 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -485,7 +485,7 @@ schema_ok_or_move() -> ensure_version_ok({ok, DiscVersion}) -> DesiredVersion = rabbit_version:desired(), - case rabbit_version:'=~='(DesiredVersion, DiscVersion) of + case rabbit_version:matches(DesiredVersion, DiscVersion) of true -> ok; false -> throw({error, {version_mismatch, DesiredVersion, DiscVersion}}) end; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index d56b50b2..866f20ee 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -209,7 +209,7 @@ upgrade_mode(AllNodes) -> [mnesia]) of {badrpc, {'EXIT', {undef, _}}} -> ErrFun(unknown_old_version); {badrpc, Reason} -> ErrFun({unknown, Reason}); - CV -> case rabbit_version:'=~='( + CV -> case rabbit_version:matches( MyVersion, CV) of true -> secondary; false -> ErrFun(CV) diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl index e079df4a..400abc10 100644 --- a/src/rabbit_version.erl +++ b/src/rabbit_version.erl @@ -16,7 +16,7 @@ -module(rabbit_version). --export([recorded/0, '=~='/2, desired/0, desired_for_scope/1, +-export([recorded/0, matches/2, desired/0, desired_for_scope/1, record_desired/0, record_desired_for_scope/1, upgrades_required/1]). @@ -32,7 +32,7 @@ -type(version() :: [atom()]). -spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())). --spec('=~='/2 :: ([A], [A]) -> boolean()). +-spec(matches/2 :: ([A], [A]) -> boolean()). -spec(desired/0 :: () -> version()). -spec(desired_for_scope/1 :: (scope()) -> scope_version()). -spec(record_desired/0 :: () -> 'ok'). @@ -79,7 +79,7 @@ record_for_scope(Scope, ScopeVersion) -> %% ------------------------------------------------------------------- -'=~='(VerA, VerB) -> +matches(VerA, VerB) -> lists:usort(VerA) =:= lists:usort(VerB). %% ------------------------------------------------------------------- -- cgit v1.2.1 From 05eb5fa6fff022148051b28c16744b82be982589 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 12:27:02 +0000 Subject: serialise_events should take an exchange really. --- include/rabbit_exchange_type_spec.hrl | 2 +- src/rabbit_binding.erl | 2 +- src/rabbit_exchange_type.erl | 6 +++--- src/rabbit_exchange_type_direct.erl | 5 ++--- src/rabbit_exchange_type_fanout.erl | 5 ++--- src/rabbit_exchange_type_headers.erl | 5 ++--- src/rabbit_exchange_type_topic.erl | 5 ++--- 7 files changed, 13 insertions(+), 17 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 8774b6ce..54687dc9 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -17,7 +17,7 @@ -ifdef(use_specs). -spec(description/0 :: () -> [{atom(), any()}]). --spec(serialise_events/0 :: () -> boolean()). +-spec(serialise_events/1 :: (rabbit_types:exchange()) -> boolean()). -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index d2767d15..d363e342 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -449,7 +449,7 @@ pd_callback(Arg, CB, X, Bindings) -> ok = rabbit_exchange:callback(X, CB, [Arg, X, Bindings]). serial(X) -> - case rabbit_exchange:callback(X, serialise_events, []) of + case rabbit_exchange:callback(X, serialise_events, [X]) of true -> next_serial(X); false -> none end. diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index b34d1aec..670551de 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -22,10 +22,10 @@ behaviour_info(callbacks) -> [ {description, 0}, - %% Should Rabbit ensure that all events delivered to this - %% exchange can be serialised (they might still be delivered out + %% Should Rabbit ensure that all events delivered to an individual exchange + %% this can be serialised? (they might still be delivered out %% of order, but there'll be a serial number). - {serialise_events, 0}, + {serialise_events, 1}, {route, 2}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index d1ea62f3..bc7a76e3 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, route/2, serialise_events/1]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -35,12 +35,11 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. -serialise_events() -> false. - route(#exchange{name = Name}, #delivery{message = #basic_message{routing_keys = Routes}}) -> rabbit_router:match_routing_key(Name, Routes). +serialise_events(_X) -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 9b6e68d8..2e70fb24 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, route/2, serialise_events/1]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -35,11 +35,10 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. -serialise_events() -> false. - route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). +serialise_events(_X) -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 1480afc8..1e8b0687 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, route/2, serialise_events/1]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -41,8 +41,6 @@ description() -> [{name, <<"headers">>}, {description, <<"AMQP headers exchange, as per the AMQP specification">>}]. -serialise_events() -> false. - route(#exchange{name = Name}, #delivery{message = #basic_message{content = Content}}) -> Headers = case (Content#content.properties)#'P_basic'.headers of @@ -114,6 +112,7 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], end, headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). +serialise_events(_X) -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 9a9cbc47..2f77b838 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, route/2, serialise_events/1]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -38,8 +38,6 @@ description() -> [{name, <<"topic">>}, {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. -serialise_events() -> false. - %% NB: This may return duplicate results in some situations (that's ok) route(#exchange{name = X}, #delivery{message = #basic_message{routing_keys = Routes}}) -> @@ -48,6 +46,7 @@ route(#exchange{name = X}, mnesia:async_dirty(fun trie_match/2, [X, Words]) end || RKey <- Routes]). +serialise_events(_X) -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. -- cgit v1.2.1 From 50f18ad821cf3e68d9fa9c67eaa2f72106b4aa84 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 14:00:31 +0000 Subject: Various QA tidyups, and stop exporting the backup / remove backup functions. --- src/rabbit_upgrade.erl | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index d56b50b2..09530f38 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -16,8 +16,7 @@ -module(rabbit_upgrade). --export([maybe_backup/0, maybe_upgrade_mnesia/0, maybe_upgrade_local/0, - maybe_remove_backup/0]). +-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]). -include("rabbit.hrl"). @@ -28,10 +27,8 @@ -ifdef(use_specs). --spec(maybe_backup/0 :: () -> 'ok'). -spec(maybe_upgrade_mnesia/0 :: () -> 'ok'). -spec(maybe_upgrade_local/0 :: () -> 'ok' | 'version_not_available'). --spec(maybe_remove_backup/0 :: () -> 'ok'). -endif. @@ -94,13 +91,13 @@ %% ------------------------------------------------------------------- -maybe_backup() -> +maybe_take_backup() -> case backup_required() of - true -> backup(); + true -> take_backup(); _ -> ok end. -backup() -> +take_backup() -> rabbit:prepare(), %% Ensure we have logs for this LockFile = lock_filename(dir()), case rabbit_misc:lock_file(LockFile) of @@ -128,17 +125,15 @@ backup() -> maybe_remove_backup() -> - case file:read_file_info(backup_dir()) of + case filelib:is_dir(backup_dir()) of {ok, _} -> remove_backup(); _ -> ok end. remove_backup() -> - LockFile = lock_filename(dir()), - BackupDir = backup_dir(), - ok = rabbit_misc:recursive_delete([BackupDir]), + ok = rabbit_misc:recursive_delete([backup_dir()]), info("upgrades: Mnesia backup removed~n", []), - ok = file:delete(LockFile). + ok = file:delete(lock_filename(dir())). backup_required() -> case {rabbit_version:upgrades_required(mnesia), @@ -150,7 +145,7 @@ backup_required() -> end. maybe_upgrade_mnesia() -> - maybe_backup(), + maybe_take_backup(), AllNodes = rabbit_mnesia:all_clustered_nodes(), case rabbit_version:upgrades_required(mnesia) of {error, version_not_available} -> @@ -278,15 +273,16 @@ node_running(Node) -> %% ------------------------------------------------------------------- maybe_upgrade_local() -> - case rabbit_version:upgrades_required(local) of - {error, version_not_available} -> version_not_available; - {error, _} = Err -> throw(Err); - {ok, []} -> ok; - {ok, Upgrades} -> mnesia:stop(), - apply_upgrades(local, Upgrades, - fun () -> ok end) - end, - maybe_remove_backup(). + Res = case rabbit_version:upgrades_required(local) of + {error, version_not_available} -> version_not_available; + {error, _} = Err -> throw(Err); + {ok, []} -> ok; + {ok, Upgrades} -> mnesia:stop(), + apply_upgrades(local, Upgrades, + fun () -> ok end) + end, + maybe_remove_backup(), + Res. %% ------------------------------------------------------------------- -- cgit v1.2.1 From b38be006e69e96cdd2e81929b874cd43bad0b9f0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 14:22:51 +0000 Subject: maybe_remove_backup is safe when returning version_not_available since we would not have taken a backup in the first place. However, this is not exactly obvious, so let's not do that. --- src/rabbit_upgrade.erl | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index b9c7b8dc..73c9ee2b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -273,16 +273,15 @@ node_running(Node) -> %% ------------------------------------------------------------------- maybe_upgrade_local() -> - Res = case rabbit_version:upgrades_required(local) of - {error, version_not_available} -> version_not_available; - {error, _} = Err -> throw(Err); - {ok, []} -> ok; - {ok, Upgrades} -> mnesia:stop(), - apply_upgrades(local, Upgrades, - fun () -> ok end) - end, - maybe_remove_backup(), - Res. + case rabbit_version:upgrades_required(local) of + {error, version_not_available} -> version_not_available; + {error, _} = Err -> throw(Err); + {ok, []} -> maybe_remove_backup(); + {ok, Upgrades} -> mnesia:stop(), + apply_upgrades(local, Upgrades, + fun () -> ok end), + maybe_remove_backup() + end. %% ------------------------------------------------------------------- -- cgit v1.2.1 From 7f13bc65ab2ea9a1c712990781a80f225c2188e9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 14:28:34 +0000 Subject: Oops. --- src/rabbit_upgrade.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 73c9ee2b..e84e1f7b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -126,8 +126,8 @@ take_backup() -> maybe_remove_backup() -> case filelib:is_dir(backup_dir()) of - {ok, _} -> remove_backup(); - _ -> ok + true -> ok = remove_backup(); + _ -> ok end. remove_backup() -> -- cgit v1.2.1 From 18c265d38bd490cd421ce05b29551e65b5b82747 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 16:42:36 +0000 Subject: Don't try to determine whether a backup is needed before doing anything, take it as needed. This inverts the backup and the lock file - the backup now comes first and the lock file is only used to defend apply_upgrades/3. --- src/rabbit_upgrade.erl | 58 ++++++++++++++++---------------------------------- 1 file changed, 18 insertions(+), 40 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index e84e1f7b..0a7e4a37 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -91,39 +91,24 @@ %% ------------------------------------------------------------------- -maybe_take_backup() -> - case backup_required() of - true -> take_backup(); - _ -> ok +ensure_backup() -> + case filelib:is_file(lock_filename()) of + false -> case filelib:is_dir(backup_dir()) of + false -> ok = take_backup(); + _ -> ok + end; + true -> throw({error, previous_upgrade_failed}) end. take_backup() -> rabbit:prepare(), %% Ensure we have logs for this - LockFile = lock_filename(dir()), - case rabbit_misc:lock_file(LockFile) of - ok -> - BackupDir = backup_dir(), - case rabbit_mnesia:copy_db(BackupDir) of - ok -> - %% We need to make the backup after creating the - %% lock file so that it protects us from trying to - %% overwrite the backup. Unfortunately this means - %% the lock file exists in the backup too, which - %% is not intuitive. Remove it. - ok = file:delete(lock_filename(BackupDir)), - info("upgrades: Mnesia dir backed up to ~p~n", [BackupDir]); - {error, E} -> - %% If we can't backup, the upgrade hasn't started - %% hence we don't need the lockfile since the real - %% mnesia dir is the good one. - ok = file:delete(LockFile), - throw({could_not_back_up_mnesia_dir, E}) - end; - {error, eexist} -> - throw({error, previous_upgrade_failed}) + BackupDir = backup_dir(), + case rabbit_mnesia:copy_db(BackupDir) of + ok -> info("upgrades: Mnesia dir backed up to ~p~n", + [BackupDir]); + {error, E} -> throw({could_not_back_up_mnesia_dir, E}) end. - maybe_remove_backup() -> case filelib:is_dir(backup_dir()) of true -> ok = remove_backup(); @@ -132,20 +117,9 @@ maybe_remove_backup() -> remove_backup() -> ok = rabbit_misc:recursive_delete([backup_dir()]), - info("upgrades: Mnesia backup removed~n", []), - ok = file:delete(lock_filename(dir())). - -backup_required() -> - case {rabbit_version:upgrades_required(mnesia), - rabbit_version:upgrades_required(local)} of - {{ok, []}, {ok, []}} -> false; - {_, {ok, _}} -> true; - {{ok, _}, _} -> true; - _ -> false - end. + info("upgrades: Mnesia backup removed~n", []). maybe_upgrade_mnesia() -> - maybe_take_backup(), AllNodes = rabbit_mnesia:all_clustered_nodes(), case rabbit_version:upgrades_required(mnesia) of {error, version_not_available} -> @@ -286,12 +260,15 @@ maybe_upgrade_local() -> %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> + ensure_backup(), + ok = rabbit_misc:lock_file(lock_filename()), info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), Fun(), [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades], info("~s upgrades: All upgrades applied successfully~n", [Scope]), - ok = rabbit_version:record_desired_for_scope(Scope). + ok = rabbit_version:record_desired_for_scope(Scope), + ok = file:delete(lock_filename()). apply_upgrade(Scope, {M, F}) -> info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]), @@ -301,6 +278,7 @@ apply_upgrade(Scope, {M, F}) -> dir() -> rabbit_mnesia:dir(). +lock_filename() -> lock_filename(dir()). lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). backup_dir() -> dir() ++ "-upgrade-backup". -- cgit v1.2.1 From e90061b37554d9acc9601ccdc64fb80cf5141901 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 16:51:34 +0000 Subject: When upgrading a secondary node we call init_db twice: once early to force a cluster rejoin (at which point we are not ready to do local upgrades, e.g. fhc is not running) and then once at the regular time. Deal with that. --- src/rabbit_mnesia.erl | 29 +++++++++++++++++------------ src/rabbit_upgrade.erl | 2 +- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 869f09a1..c1f8a22f 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -18,7 +18,7 @@ -module(rabbit_mnesia). -export([ensure_mnesia_dir/0, dir/0, status/0, init/0, is_db_empty/0, - cluster/1, force_cluster/1, reset/0, force_reset/0, init_db/2, + cluster/1, force_cluster/1, reset/0, force_reset/0, init_db/3, is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, @@ -45,7 +45,7 @@ -spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). --spec(init_db/2 :: ([node()], boolean()) -> 'ok'). +-spec(init_db/3 :: ([node()], boolean(), boolean()) -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). -spec(cluster/1 :: ([node()]) -> 'ok'). -spec(force_cluster/1 :: ([node()]) -> 'ok'). @@ -90,7 +90,7 @@ status() -> init() -> ok = ensure_mnesia_running(), ok = ensure_mnesia_dir(), - ok = init_db(read_cluster_nodes_config(), true), + ok = init_db(read_cluster_nodes_config(), true, true), ok. is_db_empty() -> @@ -112,7 +112,7 @@ cluster(ClusterNodes, Force) -> ok = ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes, Force), + ok = init_db(ClusterNodes, Force, true), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() @@ -413,7 +413,7 @@ delete_previously_running_disc_nodes() -> %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow %% connections to offline nodes. -init_db(ClusterNodes, Force) -> +init_db(ClusterNodes, Force, DoLocalUpgrades) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of @@ -451,13 +451,18 @@ init_db(ClusterNodes, Force) -> true -> disc; false -> ram end), - case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ok; - %% If we're just starting up a new node we won't have - %% a version - version_not_available -> - ok = rabbit_version:record_desired() + case DoLocalUpgrades of + true -> + case rabbit_upgrade:maybe_upgrade_local() of + ok -> + ok; + %% If we're just starting up a new + %% node we won't have a version + version_not_available -> + ok = rabbit_version:record_desired() + end; + false -> + ok end, ensure_schema_integrity() end; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 0a7e4a37..6959208b 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -231,7 +231,7 @@ secondary_upgrade(AllNodes) -> false -> AllNodes -- [node()] end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = rabbit_mnesia:init_db(ClusterNodes, true), + ok = rabbit_mnesia:init_db(ClusterNodes, true, false), ok = rabbit_version:record_desired_for_scope(mnesia), ok. -- cgit v1.2.1 From 49025c80d9eb23f59615f6a92522d48aee5bbd3a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 21 Mar 2011 16:54:09 +0000 Subject: Better name, vertical space. --- src/rabbit_mnesia.erl | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index c1f8a22f..47df1148 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -413,7 +413,7 @@ delete_previously_running_disc_nodes() -> %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow %% connections to offline nodes. -init_db(ClusterNodes, Force, DoLocalUpgrades) -> +init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of @@ -451,18 +451,16 @@ init_db(ClusterNodes, Force, DoLocalUpgrades) -> true -> disc; false -> ram end), - case DoLocalUpgrades of - true -> - case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ok; - %% If we're just starting up a new - %% node we won't have a version - version_not_available -> - ok = rabbit_version:record_desired() - end; - false -> - ok + case DoSecondaryLocalUpgrades of + true -> case rabbit_upgrade:maybe_upgrade_local() of + ok -> + ok; + %% If we're just starting up a new + %% node we won't have a version + version_not_available -> + ok = rabbit_version:record_desired() + end; + false -> ok end, ensure_schema_integrity() end; -- cgit v1.2.1 From f1d46d7b616b8cb325ff4f6e7f02569fc0e9f5f7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Mar 2011 17:56:34 +0000 Subject: Add test --- src/gm_speed_test.erl | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 src/gm_speed_test.erl diff --git a/src/gm_speed_test.erl b/src/gm_speed_test.erl new file mode 100644 index 00000000..defb0f29 --- /dev/null +++ b/src/gm_speed_test.erl @@ -0,0 +1,82 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(gm_speed_test). + +-export([test/3]). +-export([joined/2, members_changed/3, handle_msg/3, terminate/2]). +-export([wile_e_coyote/2]). + +-behaviour(gm). + +-include("gm_specs.hrl"). + +%% callbacks + +joined(Owner, _Members) -> + Owner ! joined, + ok. + +members_changed(_Owner, _Births, _Deaths) -> + ok. + +handle_msg(Owner, _From, ping) -> + Owner ! ping, + ok. + +terminate(Owner, _Reason) -> + Owner ! terminated, + ok. + +%% other + +wile_e_coyote(Time, WriteUnit) -> + {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self()), + receive joined -> ok end, + timer:sleep(1000), %% wait for all to join + timer:send_after(Time, stop), + Start = now(), + {Sent, Received} = loop(Pid, WriteUnit, 0, 0), + End = now(), + ok = gm:leave(Pid), + receive terminated -> ok end, + Elapsed = timer:now_diff(End, Start) / 1000000, + io:format("Sending rate: ~p msgs/sec~nReceiving rate: ~p msgs/sec~n~n", + [Sent/Elapsed, Received/Elapsed]), + ok. + +loop(Pid, WriteUnit, Sent, Received) -> + case read(Received) of + {stop, Received1} -> {Sent, Received1}; + {ok, Received1} -> ok = write(Pid, WriteUnit), + loop(Pid, WriteUnit, Sent + WriteUnit, Received1) + end. + +read(Count) -> + receive + ping -> read(Count + 1); + stop -> {stop, Count} + after 5 -> + {ok, Count} + end. + +write(_Pid, 0) -> ok; +write(Pid, N) -> ok = gm:broadcast(Pid, ping), + write(Pid, N - 1). + +test(Time, WriteUnit, Nodes) -> + ok = gm:create_tables(), + [spawn(Node, ?MODULE, wile_e_coyote, [Time, WriteUnit]) || Node <- Nodes]. -- cgit v1.2.1 From 0cca73f99636dd92c176a8caa54014651f58e25f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 21 Mar 2011 17:57:54 +0000 Subject: Introduce batching (again - same diff as 5f7d8d07f94f) --- src/gm.erl | 134 ++++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 92 insertions(+), 42 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 8cf22581..5b3623cf 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -376,15 +376,16 @@ confirmed_broadcast/2, group_members/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, prioritise_info/2]). + code_change/3, prioritise_cast/2, prioritise_info/2]). -export([behaviour_info/1]). --export([table_definitions/0]). +-export([table_definitions/0, flush/1]). -define(GROUP_TABLE, gm_group). -define(HIBERNATE_AFTER_MIN, 1000). -define(DESIRED_HIBERNATE, 10000). +-define(BROADCAST_TIMER, 25). -define(SETS, ordsets). -define(DICT, orddict). @@ -398,7 +399,9 @@ pub_count, members_state, callback_args, - confirms + confirms, + broadcast_buffer, + broadcast_timer }). -record(gm_group, { name, version, members }). @@ -508,21 +511,26 @@ confirmed_broadcast(Server, Msg) -> group_members(Server) -> gen_server2:call(Server, group_members, infinity). +flush(Server) -> + gen_server2:cast(Server, flush). + init([GroupName, Module, Args]) -> random:seed(now()), gen_server2:cast(self(), join), Self = self(), - {ok, #state { self = Self, - left = {Self, undefined}, - right = {Self, undefined}, - group_name = GroupName, - module = Module, - view = undefined, - pub_count = 0, - members_state = undefined, - callback_args = Args, - confirms = queue:new() }, hibernate, + {ok, #state { self = Self, + left = {Self, undefined}, + right = {Self, undefined}, + group_name = GroupName, + module = Module, + view = undefined, + pub_count = 0, + members_state = undefined, + callback_args = Args, + confirms = queue:new(), + broadcast_buffer = [], + broadcast_timer = undefined }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -620,7 +628,11 @@ handle_cast(join, State = #state { self = Self, {Module:joined(Args, all_known_members(View)), State1}); handle_cast(leave, State) -> - {stop, normal, State}. + {stop, normal, State}; + +handle_cast(flush, State) -> + noreply( + flush_broadcast_buffer(State #state { broadcast_timer = undefined })). handle_info({'DOWN', MRef, process, _Pid, _Reason}, @@ -662,14 +674,17 @@ handle_info({'DOWN', MRef, process, _Pid, _Reason}, end. -terminate(Reason, #state { module = Module, - callback_args = Args }) -> +terminate(Reason, State = #state { module = Module, + callback_args = Args }) -> + flush_broadcast_buffer(State), Module:terminate(Args, Reason). code_change(_OldVsn, State, _Extra) -> {ok, State}. +prioritise_cast(flush, _State) -> 1; +prioritise_cast(_ , _State) -> 0. prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; prioritise_info(_ , _State) -> 0. @@ -782,33 +797,62 @@ handle_msg({activity, _NotLeft, _Activity}, State) -> noreply(State) -> - {noreply, State, hibernate}. + {noreply, ensure_broadcast_timer(State), hibernate}. reply(Reply, State) -> - {reply, Reply, State, hibernate}. - -internal_broadcast(Msg, From, State = #state { self = Self, - pub_count = PubCount, - members_state = MembersState, - module = Module, - confirms = Confirms, - callback_args = Args }) -> - PubMsg = {PubCount, Msg}, - Activity = activity_cons(Self, [PubMsg], [], activity_nil()), - ok = maybe_send_activity(activity_finalise(Activity), State), - MembersState1 = - with_member( - fun (Member = #member { pending_ack = PA }) -> - Member #member { pending_ack = queue:in(PubMsg, PA) } - end, Self, MembersState), + {reply, Reply, ensure_broadcast_timer(State), hibernate}. + +ensure_broadcast_timer(State = #state { broadcast_buffer = [], + broadcast_timer = undefined }) -> + State; +ensure_broadcast_timer(State = #state { broadcast_buffer = [], + broadcast_timer = TRef }) -> + timer:cancel(TRef), + State #state { broadcast_timer = undefined }; +ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> + {ok, TRef} = timer:apply_after(?BROADCAST_TIMER, ?MODULE, flush, [self()]), + State #state { broadcast_timer = TRef }; +ensure_broadcast_timer(State) -> + State. + +internal_broadcast(Msg, From, State = #state { self = Self, + pub_count = PubCount, + module = Module, + confirms = Confirms, + callback_args = Args, + broadcast_buffer = Buffer }) -> + Result = Module:handle_msg(Args, Self, Msg), + Buffer1 = [{PubCount, Msg} | Buffer], Confirms1 = case From of none -> Confirms; _ -> queue:in({PubCount, From}, Confirms) end, - handle_callback_result({Module:handle_msg(Args, Self, Msg), - State #state { pub_count = PubCount + 1, - members_state = MembersState1, - confirms = Confirms1 }}). + State1 = State #state { pub_count = PubCount + 1, + confirms = Confirms1, + broadcast_buffer = Buffer1 }, + case From =/= none of + true -> + handle_callback_result({Result, flush_broadcast_buffer(State1)}); + false -> + handle_callback_result( + {Result, State1 #state { broadcast_buffer = Buffer1 }}) + end. + +flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) -> + State; +flush_broadcast_buffer(State = #state { self = Self, + members_state = MembersState, + broadcast_buffer = Buffer }) -> + Pubs = lists:reverse(Buffer), + Activity = activity_cons(Self, Pubs, [], activity_nil()), + ok = maybe_send_activity(activity_finalise(Activity), State), + MembersState1 = with_member( + fun (Member = #member { pending_ack = PA }) -> + PA1 = queue:join(PA, queue:from_list(Pubs)), + Member #member { pending_ack = PA1 } + end, Self, MembersState), + State #state { members_state = MembersState1, + broadcast_buffer = [] }. %% --------------------------------------------------------------------------- @@ -1093,16 +1137,22 @@ maybe_monitor(Self, Self) -> maybe_monitor(Other, _Self) -> erlang:monitor(process, Other). -check_neighbours(State = #state { self = Self, - left = Left, - right = Right, - view = View }) -> +check_neighbours(State = #state { self = Self, + left = Left, + right = Right, + view = View, + broadcast_buffer = Buffer }) -> #view_member { left = VLeft, right = VRight } = fetch_view_member(Self, View), Ver = view_version(View), Left1 = ensure_neighbour(Ver, Self, Left, VLeft), Right1 = ensure_neighbour(Ver, Self, Right, VRight), - State1 = State #state { left = Left1, right = Right1 }, + Buffer1 = case Right1 of + {Self, undefined} -> []; + _ -> Buffer + end, + State1 = State #state { left = Left1, right = Right1, + broadcast_buffer = Buffer1 }, ok = maybe_send_catchup(Right, State1), State1. -- cgit v1.2.1 From 2aeb64f3ce2bf0f0dec90e23b61578ead79781df Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Mar 2011 12:39:04 +0000 Subject: clarify documentation (thanks Emile) --- src/rabbit_variable_queue.erl | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 1b29756b..14c36b12 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -150,10 +150,13 @@ %% responsive. %% %% In the queue we keep track of both messages that are pending -%% delivery and messages that are pending acks. This ensures that -%% purging (deleting the former) and deletion (deleting the former and -%% the latter) are both cheap and do require any scanning through qi -%% segments. +%% delivery and messages that are pending acks. In the event of a +%% queue purge, we only need to load qi segments if the queue has +%% elements in deltas (i.e. it came under significant memory +%% pressure). In the event of a queue deletion, in addition to the +%% preceding, by keeping track of pending acks in RAM, we do not need +%% to search through qi segments looking for messages that are yet to +%% be acknowledged. %% %% Pending acks are recorded in memory either as the tuple {SeqId, %% MsgId, MsgProps} (tuple-form) or as the message itself (message- -- cgit v1.2.1 From 22a202104ae661bdda9ed87977d9f03e1df6f240 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 22 Mar 2011 12:39:48 +0000 Subject: Switch to erlang-nox. --- packaging/debs/Debian/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control index b01d38b3..45f5c5c4 100644 --- a/packaging/debs/Debian/debian/control +++ b/packaging/debs/Debian/debian/control @@ -7,7 +7,7 @@ Standards-Version: 3.8.0 Package: rabbitmq-server Architecture: all -Depends: erlang (>= 1:12.b.3), adduser, logrotate, ${misc:Depends} +Depends: erlang-nox (>= 1:12.b.3), adduser, logrotate, ${misc:Depends} Description: An AMQP server written in Erlang RabbitMQ is an implementation of AMQP, the emerging standard for high performance enterprise messaging. The RabbitMQ server is a robust and -- cgit v1.2.1 From eeb1e5597036ba4464221aa934be00310df5668c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Mar 2011 15:02:33 +0000 Subject: Enforce a bunch of returns --- src/rabbit_msg_store.erl | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 2b162f9d..bb26de64 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -850,16 +850,16 @@ terminate(_Reason, State = #msstate { index_state = IndexState, State1 = case CurHdl of undefined -> State; _ -> State2 = internal_sync(State), - file_handle_cache:close(CurHdl), + ok = file_handle_cache:close(CurHdl), State2 end, State3 = close_all_handles(State1), - store_file_summary(FileSummaryEts, Dir), - [ets:delete(T) || + ok = store_file_summary(FileSummaryEts, Dir), + [true = ets:delete(T) || T <- [FileSummaryEts, DedupCacheEts, FileHandlesEts, CurFileCacheEts]], IndexModule:terminate(IndexState), - store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, - {index_module, IndexModule}], Dir), + ok = store_recovery_terms([{client_refs, dict:fetch_keys(Clients)}, + {index_module, IndexModule}], Dir), State3 #msstate { index_state = undefined, current_file_handle = undefined }. @@ -912,13 +912,16 @@ internal_sync(State = #msstate { current_file_handle = CurHdl, false -> [{CRef, MsgIds} | NS] end end, [], CTM), - case {Syncs, CGs} of - {[], []} -> ok; - _ -> file_handle_cache:sync(CurHdl) - end, + ok = case {Syncs, CGs} of + {[], []} -> ok; + _ -> file_handle_cache:sync(CurHdl) + end, [K() || K <- lists:reverse(Syncs)], - [client_confirm(CRef, MsgIds, written, State1) || {CRef, MsgIds} <- CGs], - State1 #msstate { cref_to_msg_ids = dict:new(), on_sync = [] }. + State2 = lists:foldl( + fun ({CRef, MsgIds}, StateN) -> + client_confirm(CRef, MsgIds, written, StateN) + end, State1, CGs), + State2 #msstate { on_sync = [] }. write_action({true, not_found}, _MsgId, State) -> {ignore, undefined, State}; @@ -1466,7 +1469,7 @@ recover_file_summary(false, _Dir) -> recover_file_summary(true, Dir) -> Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME), case ets:file2tab(Path) of - {ok, Tid} -> file:delete(Path), + {ok, Tid} -> ok = file:delete(Path), {true, Tid}; {error, _Error} -> recover_file_summary(false, Dir) end. @@ -1533,9 +1536,7 @@ scan_file_for_valid_messages(Dir, FileName) -> Hdl, filelib:file_size( form_filename(Dir, FileName)), fun scan_fun/2, []), - %% if something really bad has happened, - %% the close could fail, but ignore - file_handle_cache:close(Hdl), + ok = file_handle_cache:close(Hdl), Valid; {error, enoent} -> {ok, [], 0}; {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}} @@ -1971,32 +1972,33 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), - file:delete(filename:join(Dir, ?CLEAN_FILENAME)), + ok = file:delete(filename:join(Dir, ?CLEAN_FILENAME)), recover_crashed_compactions(BaseDir), ok. foreach_file(D, Fun, Files) -> - [Fun(filename:join(D, File)) || File <- Files]. + [ok = Fun(filename:join(D, File)) || File <- Files]. foreach_file(D1, D2, Fun, Files) -> - [Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. + [ok = Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files]. transform_dir(BaseDir, Store, TransformFun) -> Dir = filename:join(BaseDir, atom_to_list(Store)), TmpDir = filename:join(Dir, ?TRANSFORM_TMP), TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end, + CopyFile = fun (Src, Dst) -> {ok, _Bytes} = file:copy(Src, Dst), ok end, case filelib:is_dir(TmpDir) of true -> throw({error, transform_failed_previously}); false -> FileList = list_sorted_file_names(Dir, ?FILE_EXTENSION), foreach_file(Dir, TmpDir, TransformFile, FileList), foreach_file(Dir, fun file:delete/1, FileList), - foreach_file(TmpDir, Dir, fun file:copy/2, FileList), + foreach_file(TmpDir, Dir, CopyFile, FileList), foreach_file(TmpDir, fun file:delete/1, FileList), ok = file:del_dir(TmpDir) end. transform_msg_file(FileOld, FileNew, TransformFun) -> - rabbit_misc:ensure_parent_dirs_exist(FileNew), + ok = rabbit_misc:ensure_parent_dirs_exist(FileNew), {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []), {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write], [{write_buffer, @@ -2009,6 +2011,6 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew), ok end, ok), - file_handle_cache:close(RefOld), - file_handle_cache:close(RefNew), + ok = file_handle_cache:close(RefOld), + ok = file_handle_cache:close(RefNew), ok. -- cgit v1.2.1 From 5fd7264796fbe35dbc7562b1cfc7ef09c4a3f3fb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 22 Mar 2011 17:44:39 +0000 Subject: Renaming bits and pieces for consistency and checking a few more return values, plus other minor fixes --- src/rabbit_mnesia.erl | 1 + src/rabbit_upgrade.erl | 39 ++++++++++++++++++++------------------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 47df1148..75e6eeed 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -528,6 +528,7 @@ move_db() -> ok. copy_db(Destination) -> + ok = ensure_mnesia_not_running(), rabbit_misc:recursive_copy(dir(), Destination). create_tables() -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 6959208b..39a42ef2 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -91,7 +91,7 @@ %% ------------------------------------------------------------------- -ensure_backup() -> +ensure_backup_taken() -> case filelib:is_file(lock_filename()) of false -> case filelib:is_dir(backup_dir()) of false -> ok = take_backup(); @@ -109,7 +109,7 @@ take_backup() -> {error, E} -> throw({could_not_back_up_mnesia_dir, E}) end. -maybe_remove_backup() -> +ensure_backup_removed() -> case filelib:is_dir(backup_dir()) of true -> ok = remove_backup(); _ -> ok @@ -135,6 +135,7 @@ maybe_upgrade_mnesia() -> ok; {ok, Upgrades} -> rabbit:prepare(), %% Ensure we have logs for this + ok = ensure_backup_taken(), case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(AllNodes) @@ -203,18 +204,18 @@ die(Msg, Args) -> primary_upgrade(Upgrades, Nodes) -> Others = Nodes -- [node()], - apply_upgrades( - mnesia, - Upgrades, - fun () -> - force_tables(), - case Others of - [] -> ok; - _ -> info("mnesia upgrades: Breaking cluster~n", []), - [{atomic, ok} = mnesia:del_table_copy(schema, Node) - || Node <- Others] - end - end), + ok = apply_upgrades( + mnesia, + Upgrades, + fun () -> + force_tables(), + case Others of + [] -> ok; + _ -> info("mnesia upgrades: Breaking cluster~n", []), + [{atomic, ok} = mnesia:del_table_copy(schema, Node) + || Node <- Others] + end + end), ok. force_tables() -> @@ -250,17 +251,17 @@ maybe_upgrade_local() -> case rabbit_version:upgrades_required(local) of {error, version_not_available} -> version_not_available; {error, _} = Err -> throw(Err); - {ok, []} -> maybe_remove_backup(); + {ok, []} -> ok = ensure_backup_removed(); {ok, Upgrades} -> mnesia:stop(), - apply_upgrades(local, Upgrades, - fun () -> ok end), - maybe_remove_backup() + ok = ensure_backup_taken(), + ok = apply_upgrades(local, Upgrades, + fun () -> ok end), + ok = ensure_backup_removed() end. %% ------------------------------------------------------------------- apply_upgrades(Scope, Upgrades, Fun) -> - ensure_backup(), ok = rabbit_misc:lock_file(lock_filename()), info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), -- cgit v1.2.1 From 020f72fbafe0a7d62ced75093a01e2d5239ae7ab Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 22 Mar 2011 17:52:19 +0000 Subject: cosmetic(ish): no need to match the return of ensure_* --- src/rabbit_mnesia.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 66436920..963d814e 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -78,8 +78,8 @@ status() -> {running_nodes, running_clustered_nodes()}]. init() -> - ok = ensure_mnesia_running(), - ok = ensure_mnesia_dir(), + ensure_mnesia_running(), + ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true), ok. @@ -98,8 +98,8 @@ force_cluster(ClusterNodes) -> %% node. If Force is false, only connections to online nodes are %% allowed. cluster(ClusterNodes, Force) -> - ok = ensure_mnesia_not_running(), - ok = ensure_mnesia_dir(), + ensure_mnesia_not_running(), + ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try ok = init_db(ClusterNodes, Force), @@ -455,7 +455,7 @@ create_schema() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = create_tables(), - ok = ensure_schema_integrity(), + ensure_schema_integrity(), ok = rabbit_upgrade:write_version(). move_db() -> @@ -476,7 +476,7 @@ move_db() -> {error, Reason} -> throw({error, {cannot_backup_mnesia, MnesiaDir, BackupDir, Reason}}) end, - ok = ensure_mnesia_dir(), + ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok. @@ -561,12 +561,12 @@ wait_for_tables(TableNames) -> end. reset(Force) -> - ok = ensure_mnesia_not_running(), + ensure_mnesia_not_running(), Node = node(), case Force of true -> ok; false -> - ok = ensure_mnesia_dir(), + ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), {Nodes, RunningNodes} = try -- cgit v1.2.1 From 60f50338ac0d19486a77ada8e3f7987a47449f25 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 23 Mar 2011 10:10:31 +0000 Subject: 2.4.0 changelog entries for debian and fedora --- packaging/RPMS/Fedora/rabbitmq-server.spec | 3 +++ packaging/debs/Debian/debian/changelog | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index ae9b2059..45af770a 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -120,6 +120,9 @@ done rm -rf %{buildroot} %changelog +* Tue Mar 22 2011 Alexandru Scvortov 2.4.0-1 +- New Upstream Release + * Thu Feb 3 2011 simon@rabbitmq.com 2.3.1-1 - New Upstream Release diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog index 12165dc0..2ca5074f 100644 --- a/packaging/debs/Debian/debian/changelog +++ b/packaging/debs/Debian/debian/changelog @@ -1,3 +1,9 @@ +rabbitmq-server (2.4.0-1) lucid; urgency=low + + * New Upstream Release + + -- Alexandru Scvortov Tue, 22 Mar 2011 17:34:31 +0000 + rabbitmq-server (2.3.1-1) lucid; urgency=low * New Upstream Release -- cgit v1.2.1 -- cgit v1.2.1 From 8b16025be7faf2a5a4d4e403d2150a97e03994be Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 11:53:59 +0000 Subject: New decree is that you're not meant to match against ensure_stuff calls --- src/rabbit_mnesia.erl | 8 +++++--- src/rabbit_upgrade.erl | 10 ++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index ff1b8c97..6ba9e60a 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -437,8 +437,9 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> %% We're the first node up case rabbit_upgrade:maybe_upgrade_local() of ok -> ensure_schema_integrity(); - version_not_available -> schema_ok_or_move() - end; + version_not_available -> ok = schema_ok_or_move() + end, + ok; {[AnotherNode|_], _} -> %% Subsequent node in cluster, catch up ensure_version_ok( @@ -462,7 +463,8 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> end; false -> ok end, - ensure_schema_integrity() + ensure_schema_integrity(), + ok end; {error, Reason} -> %% one reason we may end up here is if we try to join diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 39a42ef2..87a22363 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -135,7 +135,7 @@ maybe_upgrade_mnesia() -> ok; {ok, Upgrades} -> rabbit:prepare(), %% Ensure we have logs for this - ok = ensure_backup_taken(), + ensure_backup_taken(), case upgrade_mode(AllNodes) of primary -> primary_upgrade(Upgrades, AllNodes); secondary -> secondary_upgrade(AllNodes) @@ -251,12 +251,14 @@ maybe_upgrade_local() -> case rabbit_version:upgrades_required(local) of {error, version_not_available} -> version_not_available; {error, _} = Err -> throw(Err); - {ok, []} -> ok = ensure_backup_removed(); + {ok, []} -> ensure_backup_removed(), + ok; {ok, Upgrades} -> mnesia:stop(), - ok = ensure_backup_taken(), + ensure_backup_taken(), ok = apply_upgrades(local, Upgrades, fun () -> ok end), - ok = ensure_backup_removed() + ensure_backup_removed(), + ok end. %% ------------------------------------------------------------------- -- cgit v1.2.1 From 2a4f51d39b3f291a7cd7e8e9f084cee8386a8712 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 12:11:01 +0000 Subject: sort out how often and when we do the rabbit:prepare (set up log handlers), and actually make that do the mnesia upgrade. --- src/rabbit.erl | 3 ++- src/rabbit_prelaunch.erl | 4 +--- src/rabbit_upgrade.erl | 10 ++++------ 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 1361d0f4..c7d0d905 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -192,7 +192,8 @@ %%---------------------------------------------------------------------------- prepare() -> - ok = ensure_working_log_handlers(). + ok = ensure_working_log_handlers(), + ok = rabbit_upgrade:maybe_upgrade_mnesia(). start() -> try diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 92ad6a24..8800e8d6 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -235,10 +235,8 @@ post_process_script(ScriptFile) -> {error, {failed_to_load_script, Reason}} end. -process_entry(Entry = {apply,{application,start_boot,[rabbit,permanent]}}) -> - [{apply,{rabbit,prepare,[]}}, Entry]; process_entry(Entry = {apply,{application,start_boot,[mnesia,permanent]}}) -> - [{apply,{rabbit_upgrade,maybe_upgrade_mnesia,[]}}, Entry]; + [{apply,{rabbit,prepare,[]}}, Entry]; process_entry(Entry) -> [Entry]. diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 87a22363..f2d38a93 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -101,7 +101,6 @@ ensure_backup_taken() -> end. take_backup() -> - rabbit:prepare(), %% Ensure we have logs for this BackupDir = backup_dir(), case rabbit_mnesia:copy_db(BackupDir) of ok -> info("upgrades: Mnesia dir backed up to ~p~n", @@ -134,12 +133,11 @@ maybe_upgrade_mnesia() -> {ok, []} -> ok; {ok, Upgrades} -> - rabbit:prepare(), %% Ensure we have logs for this ensure_backup_taken(), - case upgrade_mode(AllNodes) of - primary -> primary_upgrade(Upgrades, AllNodes); - secondary -> secondary_upgrade(AllNodes) - end + ok = case upgrade_mode(AllNodes) of + primary -> primary_upgrade(Upgrades, AllNodes); + secondary -> secondary_upgrade(AllNodes) + end end. upgrade_mode(AllNodes) -> -- cgit v1.2.1 From fe150f5c5607dbda654074553000af2310d0a7e2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 14:11:09 +0000 Subject: Do an impersonation of the serialisation thing for create / delete. Since the counter resets after delete this doesn't solve the problem but it makes the API cleaner and maybe future-proof. --- src/rabbit_binding.erl | 15 +++++++++------ src/rabbit_exchange.erl | 10 +++++++++- src/rabbit_exchange_type_topic.erl | 4 ++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index d363e342..33525e07 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -415,7 +415,10 @@ process_deletions(Deletions, transaction) -> pd_callback(transaction, remove_bindings, X, Bindings), dict:store(X, serial(X), Acc) end, - fun rabbit_misc:const_ok/1, + fun (X, Bindings, Acc) -> + pd_callback(transaction, delete, X, Bindings), + dict:store(X, serial(X), Acc) + end, Deletions, dict:new(), true); process_deletions(Deletions, Serials) -> @@ -424,8 +427,10 @@ process_deletions(Deletions, Serials) -> pd_callback(dict:fetch(X, Serials), remove_bindings, X, Bindings), Acc end, - fun (X) -> - rabbit_event:notify(exchange_deleted, [{name, X#exchange.name}]) + fun (X, Bindings, Acc) -> + pd_callback(dict:fetch(X, Serials), delete, X, Bindings), + rabbit_event:notify(exchange_deleted, [{name, X#exchange.name}]), + Acc end, Deletions, ok, false). @@ -439,9 +444,7 @@ process_deletions(NotDeletedFun, DeletedFun, Deletions, Acc0, Tx) -> not_deleted -> NotDeletedFun(X, FlatBindings, Acc); deleted -> - DeletedFun(X), - pd_callback(Tx, delete, X, Bindings), - Acc + DeletedFun(X, FlatBindings, Acc) end end, Acc0, Deletions). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 35612153..622eb9f1 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -126,7 +126,15 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - callback(Exchange, create, [Tx, Exchange]), + S = case Tx of + true -> transaction; + false -> case callback(Exchange, serialise_events, + [Exchange]) of + true -> 0; + false -> none + end + end, + callback(Exchange, create, [S, Exchange]), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2f77b838..e3fd9283 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -56,11 +56,11 @@ recover(_Exchange, Bs) -> lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) end). -delete(true, #exchange{name = X}, _Bs) -> +delete(transaction, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), trie_remove_all_bindings(X), ok; -delete(false, _Exchange, _Bs) -> +delete(none, _Exchange, _Bs) -> ok. add_binding(transaction, _Exchange, Binding) -> -- cgit v1.2.1 From 02a4098c915add7c5f9b9002cf5ff0d6783e091d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 14:40:57 +0000 Subject: Detect discnodeishness prior to suffering disclessness --- src/rabbit_upgrade.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index f2d38a93..85f6e88c 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -220,12 +220,14 @@ force_tables() -> [mnesia:force_load_table(T) || T <- rabbit_mnesia:table_names()]. secondary_upgrade(AllNodes) -> + %% must do this before we wipe out schema + IsDiscNode = is_disc_node(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), %% Note that we cluster with all nodes, rather than all disc nodes %% (as we can't know all disc nodes at this point). This is safe as %% we're not writing the cluster config, just setting up Mnesia. - ClusterNodes = case is_disc_node() of + ClusterNodes = case IsDiscNode of true -> AllNodes; false -> AllNodes -- [node()] end, -- cgit v1.2.1 From 92da7ca505f469be777aea3ea0321e15c0f99f28 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 14:43:30 +0000 Subject: Correct types. --- include/rabbit_exchange_type_spec.hrl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 54687dc9..ae33e07a 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -16,19 +16,21 @@ -ifdef(use_specs). +-type(serial() :: pos_integer() | 'transaction' | 'none'). + -spec(description/0 :: () -> [{atom(), any()}]). -spec(serialise_events/1 :: (rabbit_types:exchange()) -> boolean()). -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). --spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). +-spec(create/2 :: (serial(), rabbit_types:exchange()) -> 'ok'). -spec(recover/2 :: (rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(delete/3 :: (boolean(), rabbit_types:exchange(), +-spec(delete/3 :: (serial(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(add_binding/3 :: (boolean(), rabbit_types:exchange(), +-spec(add_binding/3 :: (serial(), rabbit_types:exchange(), rabbit_types:binding()) -> 'ok'). --spec(remove_bindings/3 :: (boolean(), rabbit_types:exchange(), +-spec(remove_bindings/3 :: (serial(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(assert_args_equivalence/2 :: (rabbit_types:exchange(), rabbit_framing:amqp_table()) -- cgit v1.2.1 From 2737c3b1801055190278bff534ebfa590aba8ff9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 14:44:11 +0000 Subject: Since we use callback() to call serialise_events() this can now return a boolean(). It's probably nicer to add the type here than add another function. --- src/rabbit_exchange.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 622eb9f1..504cf935 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -72,7 +72,8 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). +-spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> + boolean() | 'ok'). -endif. -- cgit v1.2.1 From 88824c14d55b1e35bc776655c8bb564c0aaee57d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 15:26:49 +0000 Subject: Only return the queue the second time round and thus don't call XT:add_binding(#amqqueue{}, ...). --- src/rabbit_amqqueue.erl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 102ea13b..80dcb79a 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -214,7 +214,13 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) -> case mnesia:read({rabbit_durable_queue, QueueName}) of [] -> ok = store_queue(Q), B = add_default_binding(Q), - fun (Tx) -> B(Tx), Q end; + fun (Tx) -> + R = B(Tx), + case Tx of + transaction -> R; + _ -> Q + end + end; %% Q exists on stopped node [_] -> rabbit_misc:const(not_found) end; -- cgit v1.2.1 From 2cb7c4257df8c2ae2407779a4e4ca8b09b6b9782 Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Wed, 23 Mar 2011 15:34:23 +0000 Subject: Switched to now_ms() --- src/rabbit_error_logger.erl | 3 ++- src/rabbit_misc.erl | 8 +------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 33dfcef9..5f53e430 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -70,6 +70,7 @@ publish1(RoutingKey, Format, Data, LogExch) -> {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, #'P_basic'{content_type = <<"text/plain">>, - timestamp = rabbit_misc:timestamp()}, + timestamp = + rabbit_misc:now_ms() div 1000}, list_to_binary(io_lib:format(Format, Data))), ok. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 713498c8..e79a58a1 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -52,7 +52,7 @@ unlink_and_capture_exit/1]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). --export([now_ms/0, timestamp/0]). +-export([now_ms/0]). -export([lock_file/1]). -export([const_ok/1, const/1]). -export([ntoa/1, ntoab/1]). @@ -190,7 +190,6 @@ {bad_edge, [digraph:vertex()]}), digraph:vertex(), digraph:vertex()})). -spec(now_ms/0 :: () -> non_neg_integer()). --spec(timestamp/0 ::() -> non_neg_integer()). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). -spec(const_ok/1 :: (any()) -> 'ok'). -spec(const/1 :: (A) -> const(A)). @@ -200,7 +199,6 @@ -endif. --define(EPOCH, {{1970, 1, 1}, {0, 0, 0}}). %%---------------------------------------------------------------------------- method_record_type(Record) -> @@ -793,10 +791,6 @@ get_flag(_, []) -> now_ms() -> timer:now_diff(now(), {0,0,0}) div 1000. -timestamp() -> - calendar:datetime_to_gregorian_seconds(erlang:universaltime()) - - calendar:datetime_to_gregorian_seconds(?EPOCH). - module_attributes(Module) -> case catch Module:module_info(attributes) of {'EXIT', {undef, [{Module, module_info, _} | _]}} -> -- cgit v1.2.1 From c63dcaa034093cd1dc217c06c102127d18ac524f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 16:05:04 +0000 Subject: Record all nodes, don't list them when we refuse to start. --- src/rabbit.erl | 4 ++-- src/rabbit_mnesia.erl | 21 +++++++++------------ src/rabbit_upgrade.erl | 15 ++++++--------- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 1361d0f4..e60886fa 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -233,7 +233,7 @@ rotate_logs(BinarySuffix) -> start(normal, []) -> case erts_version_check() of ok -> - ok = rabbit_mnesia:delete_previously_running_disc_nodes(), + ok = rabbit_mnesia:delete_previously_running_nodes(), {ok, SupPid} = rabbit_sup:start_link(), true = register(rabbit, self()), @@ -246,7 +246,7 @@ start(normal, []) -> end. stop(_State) -> - ok = rabbit_mnesia:record_running_disc_nodes(), + ok = rabbit_mnesia:record_running_nodes(), terminated_ok = error_logger:delete_report_handler(rabbit_error_logger), ok = rabbit_alarm:stop(), ok = case rabbit_mnesia:is_clustered() of diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 47df1148..e661e5e3 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -22,8 +22,8 @@ is_clustered/0, running_clustered_nodes/0, all_clustered_nodes/0, empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, - record_running_disc_nodes/0, read_previously_running_disc_nodes/0, - delete_previously_running_disc_nodes/0, running_nodes_filename/0]). + record_running_nodes/0, read_previously_running_nodes/0, + delete_previously_running_nodes/0, running_nodes_filename/0]). -export([table_names/0]). @@ -61,9 +61,9 @@ -spec(wait_for_tables/1 :: ([atom()]) -> 'ok'). -spec(create_cluster_nodes_config/1 :: ([node()]) -> 'ok'). -spec(read_cluster_nodes_config/0 :: () -> [node()]). --spec(record_running_disc_nodes/0 :: () -> 'ok'). --spec(read_previously_running_disc_nodes/0 :: () -> [node()]). --spec(delete_previously_running_disc_nodes/0 :: () -> 'ok'). +-spec(record_running_nodes/0 :: () -> 'ok'). +-spec(read_previously_running_nodes/0 :: () -> [node()]). +-spec(delete_previously_running_nodes/0 :: () -> 'ok'). -spec(running_nodes_filename/0 :: () -> file:filename()). -endif. @@ -380,18 +380,15 @@ delete_cluster_nodes_config() -> running_nodes_filename() -> filename:join(dir(), "nodes_running_at_shutdown"). -record_running_disc_nodes() -> +record_running_nodes() -> FileName = running_nodes_filename(), - Nodes = sets:to_list( - sets:intersection( - sets:from_list(nodes_of_type(disc_copies)), - sets:from_list(running_clustered_nodes()))) -- [node()], + Nodes = running_clustered_nodes() -- [node()], %% Don't check the result: we're shutting down anyway and this is %% a best-effort-basis. rabbit_misc:write_term_file(FileName, [Nodes]), ok. -read_previously_running_disc_nodes() -> +read_previously_running_nodes() -> FileName = running_nodes_filename(), case rabbit_misc:read_term_file(FileName) of {ok, [Nodes]} -> Nodes; @@ -400,7 +397,7 @@ read_previously_running_disc_nodes() -> FileName, Reason}}) end. -delete_previously_running_disc_nodes() -> +delete_previously_running_nodes() -> FileName = running_nodes_filename(), case file:delete(FileName) of ok -> ok; diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 6959208b..244be522 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -144,7 +144,7 @@ maybe_upgrade_mnesia() -> upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> - AfterUs = rabbit_mnesia:read_previously_running_disc_nodes(), + AfterUs = rabbit_mnesia:read_previously_running_nodes(), case {is_disc_node(), AfterUs} of {true, []} -> primary; @@ -152,14 +152,11 @@ upgrade_mode(AllNodes) -> Filename = rabbit_mnesia:running_nodes_filename(), die("Cluster upgrade needed but other disc nodes shut " "down after this one.~nPlease first start the last " - "disc node to shut down.~nThe disc nodes that were " - "still running when this one shut down are:~n~n" - " ~p~n~nNote: if several disc nodes were shut down " - "simultaneously they may all~nshow this message. " - "In which case, remove the lock file on one of them " - "and~nstart that node. The lock file on this node " - "is:~n~n ~s ", - [AfterUs, Filename]); + "disc node to shut down.~n~nNote: if several disc " + "nodes were shut down simultaneously they may " + "all~nshow this message. In which case, remove " + "the lock file on one of them and~nstart that node. " + "The lock file on this node is:~n~n ~s ", [Filename]); {false, _} -> die("Cluster upgrade needed but this is a ram node.~n" "Please first start the last disc node to shut down.", -- cgit v1.2.1 From 4bb07e06818a4986507685eda2dff36ab56687c5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 23 Mar 2011 16:16:23 +0000 Subject: Explain --- src/rabbit_error_logger.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 5f53e430..4b13033e 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -70,6 +70,9 @@ publish1(RoutingKey, Format, Data, LogExch) -> {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, #'P_basic'{content_type = <<"text/plain">>, + %% NB: 0-9-1 says it's a "64 bit POSIX + %% timestamp". That's second + %% resolution, not millisecond. timestamp = rabbit_misc:now_ms() div 1000}, list_to_binary(io_lib:format(Format, Data))), -- cgit v1.2.1 From 77400eaae417d65c9a2556d9281a44a2d521342c Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 23 Mar 2011 16:26:33 +0000 Subject: cosmetic --- src/rabbit_error_logger.erl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 4b13033e..3fb0817a 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -67,13 +67,12 @@ publish(_Other, _Format, _Data, _State) -> ok. publish1(RoutingKey, Format, Data, LogExch) -> + %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's + %% second resolution, not millisecond. + Timestamp = rabbit_misc:now_ms() div 1000, {ok, _RoutingRes, _DeliveredQPids} = rabbit_basic:publish(LogExch, RoutingKey, false, false, none, #'P_basic'{content_type = <<"text/plain">>, - %% NB: 0-9-1 says it's a "64 bit POSIX - %% timestamp". That's second - %% resolution, not millisecond. - timestamp = - rabbit_misc:now_ms() div 1000}, + timestamp = Timestamp}, list_to_binary(io_lib:format(Format, Data))), ok. -- cgit v1.2.1 From 3b89e0573c46e82557dc2592514907e2a6d0ae71 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 16:50:28 +0000 Subject: ARGH! Trailing line --- src/rabbit_misc.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index e79a58a1..2e9563cf 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -871,4 +871,3 @@ is_process_alive(Pid) -> true -> true; _ -> false end. - -- cgit v1.2.1 From 21ac2b8a105560ab59b62c42d9ce6ad05ea9f34d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 17:08:19 +0000 Subject: Abstract out continuation --- src/rabbit_mnesia.erl | 29 ++++++++++++++--------------- src/rabbit_upgrade.erl | 2 +- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 9ca52327..8bc89880 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -45,7 +45,7 @@ -spec(dir/0 :: () -> file:filename()). -spec(ensure_mnesia_dir/0 :: () -> 'ok'). -spec(init/0 :: () -> 'ok'). --spec(init_db/3 :: ([node()], boolean(), boolean()) -> 'ok'). +-spec(init_db/3 :: ([node()], boolean(), rabbit_misc:thunk('ok')) -> 'ok'). -spec(is_db_empty/0 :: () -> boolean()). -spec(cluster/1 :: ([node()]) -> 'ok'). -spec(force_cluster/1 :: ([node()]) -> 'ok'). @@ -90,7 +90,8 @@ status() -> init() -> ensure_mnesia_running(), ensure_mnesia_dir(), - ok = init_db(read_cluster_nodes_config(), true, true), + ok = init_db(read_cluster_nodes_config(), true, + fun maybe_upgrade_local_or_record_desired/0), ok. is_db_empty() -> @@ -112,7 +113,7 @@ cluster(ClusterNodes, Force) -> ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes, Force, true), + ok = init_db(ClusterNodes, Force, fun () -> ok end), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() @@ -410,7 +411,7 @@ delete_previously_running_nodes() -> %% standalone disk node, or disk or ram node connected to the %% specified cluster nodes. If Force is false, don't allow %% connections to offline nodes. -init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> +init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of @@ -449,17 +450,7 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> true -> disc; false -> ram end), - case DoSecondaryLocalUpgrades of - true -> case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ok; - %% If we're just starting up a new - %% node we won't have a version - version_not_available -> - ok = rabbit_version:record_desired() - end; - false -> ok - end, + ok = SecondaryPostMnesiaFun(), ensure_schema_integrity(), ok end; @@ -470,6 +461,14 @@ init_db(ClusterNodes, Force, DoSecondaryLocalUpgrades) -> throw({error, {unable_to_join_cluster, ClusterNodes, Reason}}) end. +maybe_upgrade_local_or_record_desired() -> + case rabbit_upgrade:maybe_upgrade_local() of + ok -> ok; + %% If we're just starting up a new node we won't have a + %% version + version_not_available -> ok = rabbit_version:record_desired() + end. + schema_ok_or_move() -> case check_schema_integrity() of ok -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 3981b173..5ec08330 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -229,7 +229,7 @@ secondary_upgrade(AllNodes) -> false -> AllNodes -- [node()] end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = rabbit_mnesia:init_db(ClusterNodes, true, false), + ok = rabbit_mnesia:init_db(ClusterNodes, true, fun () -> ok end), ok = rabbit_version:record_desired_for_scope(mnesia), ok. -- cgit v1.2.1 From 330eb98c7bc0e3df4149807dba765263a06c2d3d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 17:23:12 +0000 Subject: Turns out it's very important that we do write the schema_version when call mnesia:cluster --- src/rabbit_mnesia.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 8bc89880..fbcf07ae 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -113,7 +113,8 @@ cluster(ClusterNodes, Force) -> ensure_mnesia_dir(), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try - ok = init_db(ClusterNodes, Force, fun () -> ok end), + ok = init_db(ClusterNodes, Force, + fun maybe_upgrade_local_or_record_desired/0), ok = create_cluster_nodes_config(ClusterNodes) after mnesia:stop() -- cgit v1.2.1 From 5d51177b297d5425741b808fb6f78a2712a0376e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 23 Mar 2011 18:06:16 +0000 Subject: cough --- src/rabbit_upgrade.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index 5ec08330..a2abb1e5 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -64,11 +64,11 @@ %% into the boot process by prelaunch before the mnesia application is %% started. By the time Mnesia is started the upgrades have happened %% (on the primary), or Mnesia has been reset (on the secondary) and -%% rabbit_mnesia:init_db/2 can then make the node rejoin the cluster +%% rabbit_mnesia:init_db/3 can then make the node rejoin the cluster %% in the normal way. %% %% The non-mnesia upgrades are then triggered by -%% rabbit_mnesia:init_db/2. Of course, it's possible for a given +%% rabbit_mnesia:init_db/3. Of course, it's possible for a given %% upgrade process to only require Mnesia upgrades, or only require %% non-Mnesia upgrades. In the latter case no Mnesia resets and %% reclusterings occur. -- cgit v1.2.1 From 129628e9f1c9a9d8dc0662de2cc7c50459d622d3 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Wed, 23 Mar 2011 20:12:03 +0000 Subject: removing trap_exit flag in rabbit_channel --- src/rabbit_channel.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 0c12614c..5099bf3f 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -156,7 +156,6 @@ ready_for_close(Pid) -> init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun]) -> - process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), StatsTimer = rabbit_event:init_stats_timer(), State = #ch{state = starting, -- cgit v1.2.1 From c3519792e0c7358da2c108b43e2214f2a9f08875 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 24 Mar 2011 10:45:04 +0000 Subject: Fix for API change --- src/rabbit_tests.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index b8c3f4a9..fc5c398e 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -694,8 +694,8 @@ test_topic_matching() -> exchange_op_callback(X, Fun, ExtraArgs) -> rabbit_misc:execute_mnesia_transaction( - fun () -> rabbit_exchange:callback(X, Fun, [true, X] ++ ExtraArgs) end), - rabbit_exchange:callback(X, Fun, [false, X] ++ ExtraArgs). + fun () -> rabbit_exchange:callback(X, Fun, [transaction, X] ++ ExtraArgs) end), + rabbit_exchange:callback(X, Fun, [none, X] ++ ExtraArgs). test_topic_expect_match(X, List) -> lists:foreach( -- cgit v1.2.1 From a4cf7108a08c7913e32be2ccba2a41351d578b08 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 25 Mar 2011 10:27:22 +0000 Subject: Shorten name for alignment reasons. --- src/rabbit_upgrade_functions.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 28aee9c9..7c53e996 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -26,7 +26,7 @@ -rabbit_upgrade({internal_exchanges, mnesia, []}). -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). -rabbit_upgrade({topic_trie, mnesia, []}). --rabbit_upgrade({exchange_event_serialisation, mnesia, []}). +-rabbit_upgrade({exchange_event_serial, mnesia, []}). %% ------------------------------------------------------------------- @@ -38,7 +38,7 @@ -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). -spec(topic_trie/0 :: () -> 'ok'). --spec(exchange_event_serialisation/0 :: () -> 'ok'). +-spec(exchange_event_serial/0 :: () -> 'ok'). -endif. @@ -103,7 +103,7 @@ topic_trie() -> {attributes, [trie_binding, value]}, {type, ordered_set}]). -exchange_event_serialisation() -> +exchange_event_serial() -> create(rabbit_exchange_serial, [{record_name, exchange_serial}, {attributes, [name, serial]}]). -- cgit v1.2.1 From 5de9f9dc0af669df764db3a3915fd810918f232c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 25 Mar 2011 17:33:07 +0000 Subject: Correct test for existance of config file --- packaging/common/rabbitmq-server.ocf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf index 94999d0e..d58c48ed 100755 --- a/packaging/common/rabbitmq-server.ocf +++ b/packaging/common/rabbitmq-server.ocf @@ -103,9 +103,9 @@ The IP Port for rabbitmq-server to listen on -Location of the config file +Location of the config file (without the .config suffix) -Config file path +Config file path (without the .config suffix) @@ -189,8 +189,8 @@ rabbit_validate_partial() { } rabbit_validate_full() { - if [ ! -z $RABBITMQ_CONFIG_FILE ] && [ ! -e $RABBITMQ_CONFIG_FILE ]; then - ocf_log err "rabbitmq-server config_file $RABBITMQ_CONFIG_FILE does not exist or is not a file"; + if [ ! -z $RABBITMQ_CONFIG_FILE ] && [ ! -e "${RABBITMQ_CONFIG_FILE}.config" ]; then + ocf_log err "rabbitmq-server config_file ${RABBITMQ_CONFIG_FILE}.config does not exist or is not a file"; exit $OCF_ERR_INSTALLED; fi -- cgit v1.2.1 From 927c6811e3567c9711b55034feb01002e4aaf516 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 13:35:24 +0100 Subject: Make serialise_events into /0, don't call it via callback/3. --- include/rabbit_exchange_type_spec.hrl | 2 +- src/rabbit_binding.erl | 2 +- src/rabbit_exchange.erl | 13 +++++++------ src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_direct.erl | 4 ++-- src/rabbit_exchange_type_fanout.erl | 4 ++-- src/rabbit_exchange_type_headers.erl | 4 ++-- src/rabbit_exchange_type_topic.erl | 4 ++-- 8 files changed, 18 insertions(+), 17 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index ae33e07a..9458d2fe 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -19,7 +19,7 @@ -type(serial() :: pos_integer() | 'transaction' | 'none'). -spec(description/0 :: () -> [{atom(), any()}]). --spec(serialise_events/1 :: (rabbit_types:exchange()) -> boolean()). +-spec(serialise_events/0 :: () -> boolean()). -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index cc7aea33..1c043370 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -453,7 +453,7 @@ pd_callback(Arg, CB, X, Bindings) -> ok = rabbit_exchange:callback(X, CB, [Arg, X, Bindings]). serial(X) -> - case rabbit_exchange:callback(X, serialise_events, [X]) of + case rabbit_exchange:serialise_events([X]) of true -> next_serial(X); false -> none end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 504cf935..5694336a 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -20,7 +20,7 @@ -export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). --export([callback/3]). +-export([callback/3, serialise_events/1]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). -export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). @@ -72,9 +72,8 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> - boolean() | 'ok'). - +-spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). +-spec(serialise_events/1:: (rabbit_types:exchange()) -> boolean()). -endif. %%---------------------------------------------------------------------------- @@ -129,8 +128,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> fun ({new, Exchange}, Tx) -> S = case Tx of true -> transaction; - false -> case callback(Exchange, serialise_events, - [Exchange]) of + false -> case serialise_events(Exchange) of true -> 0; false -> none end @@ -307,6 +305,9 @@ maybe_auto_delete(#exchange{auto_delete = true} = X) -> callback(#exchange{type = XType}, Fun, Args) -> apply(type_to_module(XType), Fun, Args). +serialise_events(#exchange{type = XType}) -> + apply(type_to_module(XType), serialise_events, []). + conditional_delete(X = #exchange{name = XName}) -> case rabbit_binding:has_for_source(XName) of false -> unconditional_delete(X); diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 670551de..468ee0b4 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -25,7 +25,7 @@ behaviour_info(callbacks) -> %% Should Rabbit ensure that all events delivered to an individual exchange %% this can be serialised? (they might still be delivered out %% of order, but there'll be a serial number). - {serialise_events, 1}, + {serialise_events, 0}, {route, 2}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index bc7a76e3..b99ee27d 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/1]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -39,7 +39,7 @@ route(#exchange{name = Name}, #delivery{message = #basic_message{routing_keys = Routes}}) -> rabbit_router:match_routing_key(Name, Routes). -serialise_events(_X) -> false. +serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 2e70fb24..e12c9964 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/1]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -38,7 +38,7 @@ description() -> route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). -serialise_events(_X) -> false. +serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 1e8b0687..f571978f 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/1]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -112,7 +112,7 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], end, headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). -serialise_events(_X) -> false. +serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index e3fd9283..1bd91dcb 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/1]). +-export([description/0, route/2, serialise_events/0]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -46,7 +46,7 @@ route(#exchange{name = X}, mnesia:async_dirty(fun trie_match/2, [X, Words]) end || RKey <- Routes]). -serialise_events(_X) -> false. +serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. -- cgit v1.2.1 From d98643f89d77264c98b5b895dc1c3b3fffb25d5c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 14:16:39 +0100 Subject: Oops. --- src/rabbit_binding.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1c043370..987287c9 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -453,7 +453,7 @@ pd_callback(Arg, CB, X, Bindings) -> ok = rabbit_exchange:callback(X, CB, [Arg, X, Bindings]). serial(X) -> - case rabbit_exchange:serialise_events([X]) of + case rabbit_exchange:serialise_events(X) of true -> next_serial(X); false -> none end. -- cgit v1.2.1 From 7e8b0ebc3713d2ec8cff74d0de3d780ef937ef71 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 14:16:55 +0100 Subject: Unify funs, cosmetic. --- src/rabbit_binding.erl | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 987287c9..1564573e 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -412,40 +412,34 @@ process_addition(_Src, B, _Serial) -> process_deletions(Deletions, transaction) -> process_deletions( - fun (X, Bindings, Acc) -> - pd_callback(transaction, remove_bindings, X, Bindings), - dict:store(X, serial(X), Acc) - end, - fun (X, Bindings, Acc) -> - pd_callback(transaction, delete, X, Bindings), + fun (Mode, X, Bindings, Acc) -> + pd_callback(transaction, Mode, X, Bindings), dict:store(X, serial(X), Acc) end, Deletions, dict:new(), true); process_deletions(Deletions, Serials) -> process_deletions( - fun (X, Bindings, Acc) -> - pd_callback(dict:fetch(X, Serials), remove_bindings, X, Bindings), - Acc - end, - fun (X, Bindings, Acc) -> - pd_callback(dict:fetch(X, Serials), delete, X, Bindings), - rabbit_event:notify(exchange_deleted, [{name, X#exchange.name}]), + fun (Mode, X, Bindings, Acc) -> + pd_callback(dict:fetch(X, Serials), Mode, X, Bindings), + case Mode of + delete -> rabbit_event:notify(exchange_deleted, + [{name, X#exchange.name}]); + _ -> ok + end, Acc end, Deletions, ok, false). -process_deletions(NotDeletedFun, DeletedFun, Deletions, Acc0, Tx) -> +process_deletions(Fun, Deletions, Acc0, Tx) -> dict:fold( fun (_XName, {X, Deleted, Bindings}, Acc) -> FlatBindings = lists:flatten(Bindings), [rabbit_event:notify_if(not Tx, binding_deleted, info(B)) || B <- FlatBindings], case Deleted of - not_deleted -> - NotDeletedFun(X, FlatBindings, Acc); - deleted -> - DeletedFun(X, FlatBindings, Acc) + not_deleted -> Fun(remove_bindings, X, FlatBindings, Acc); + deleted -> Fun(delete, X, FlatBindings, Acc) end end, Acc0, Deletions). -- cgit v1.2.1 From 17218a5174d1cd5fe2dfa340d90ccd781b50a224 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 15:41:19 +0100 Subject: Make the tail fun in execute_mnesia_tx_with_tail *only* get executed after the tx, and roll other uses into the tx fun. This is rather simpler hopefully. --- src/rabbit_amqqueue.erl | 16 ++++++---------- src/rabbit_binding.erl | 28 +++++++++++++--------------- src/rabbit_exchange.erl | 40 ++++++++++++++++++++++++---------------- src/rabbit_misc.erl | 17 ++++++----------- 4 files changed, 49 insertions(+), 52 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 80dcb79a..60b7b384 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -214,13 +214,7 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) -> case mnesia:read({rabbit_durable_queue, QueueName}) of [] -> ok = store_queue(Q), B = add_default_binding(Q), - fun (Tx) -> - R = B(Tx), - case Tx of - transaction -> R; - _ -> Q - end - end; + fun () -> B(), Q end; %% Q exists on stopped node [_] -> rabbit_misc:const(not_found) end; @@ -228,7 +222,7 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) -> case rabbit_misc:is_process_alive(QPid) of true -> rabbit_misc:const(ExistingQ); false -> TailFun = internal_delete(QueueName), - fun (Tx) -> TailFun(Tx), ExistingQ end + fun () -> TailFun(), ExistingQ end end end end). @@ -439,8 +433,10 @@ internal_delete(QueueName) -> case mnesia:wread({rabbit_queue, QueueName}) of [] -> rabbit_misc:const({error, not_found}); [_] -> Deletions = internal_delete1(QueueName), - fun (Tx) -> rabbit_binding:process_deletions( - Deletions, Tx) + Serials = rabbit_binding:process_deletions( + Deletions, transaction), + fun () -> rabbit_binding:process_deletions( + Deletions, Serials) end end end). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1564573e..b765d5e1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -122,20 +122,23 @@ add(Binding, InnerFun) -> case InnerFun(Src, Dst) of ok -> case mnesia:read({rabbit_route, B}) of - [] -> ok = sync_binding(B, all_durable([Src, Dst]), - fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback( - Src, add_binding, [Tx, Src, B]), - process_addition(Src, B, Tx) - end; - [_] -> fun rabbit_misc:const_ok/1 + [] -> add_notify(Src, Dst, B); + [_] -> fun rabbit_misc:const_ok/0 end; {error, _} = Err -> rabbit_misc:const(Err) end end). +add_notify(Src, Dst, B) -> + ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), + ok = rabbit_exchange:callback(Src, add_binding, [transaction, Src, B]), + Serial = serial(Src), + fun () -> + ok = rabbit_exchange:callback(Src, add_binding, [Serial, Src, B]), + ok = rabbit_event:notify(binding_created, info(B)) + end. + remove(Binding, InnerFun) -> binding_action( Binding, @@ -160,7 +163,8 @@ remove(Binding, InnerFun) -> {error, _} = Err -> rabbit_misc:const(Err); {ok, Deletions} -> - fun (Tx) -> process_deletions(Deletions, Tx) end + Serials = process_deletions(Deletions, transaction), + fun () -> process_deletions(Deletions, Serials) end end end). @@ -404,12 +408,6 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> anything_but(not_deleted, Deleted1, Deleted2), [Bindings1 | Bindings2]}. -process_addition(Src, _B, transaction) -> - serial(Src); - -process_addition(_Src, B, _Serial) -> - ok = rabbit_event:notify(binding_created, info(B)). - process_deletions(Deletions, transaction) -> process_deletions( fun (Mode, X, Bindings, Acc) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 5694336a..e704a44c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -270,28 +270,36 @@ process_route(#resource{kind = queue} = QName, {WorkList, SeenXs, QNames}) -> {WorkList, SeenXs, [QName | QNames]}. -call_with_exchange(XName, Fun, PrePostCommitFun) -> +call_with_exchange(XName, Fun) -> rabbit_misc:execute_mnesia_tx_with_tail( - fun () -> Result = case mnesia:read({rabbit_exchange, XName}) of - [] -> {error, not_found}; - [X] -> Fun(X) - end, - fun(Tx) -> PrePostCommitFun(Result, Tx) end + fun () -> case mnesia:read({rabbit_exchange, XName}) of + [] -> rabbit_misc:const({error, not_found}); + [X] -> Fun(X) + end end). delete(XName, IfUnused) -> + delete0(XName, case IfUnused of + true -> fun conditional_delete/1; + false -> fun unconditional_delete/1 + end). + +delete0(XName, Fun) -> call_with_exchange( XName, - case IfUnused of - true -> fun conditional_delete/1; - false -> fun unconditional_delete/1 - end, - fun ({deleted, X, Bs, Deletions}, Tx) -> - rabbit_binding:process_deletions( - rabbit_binding:add_deletion( - XName, {X, deleted, Bs}, Deletions), Tx); - (Error = {error, _InUseOrNotFound}, _Tx) -> - Error + fun (X) -> + case Fun(X) of + {deleted, X, Bs, Deletions} -> + Dels1 = rabbit_binding:add_deletion( + XName, {X, deleted, Bs}, Deletions), + Serials = rabbit_binding:process_deletions( + Dels1, transaction), + fun () -> + rabbit_binding:process_deletions(Dels1, Serials) + end; + {error, _InUseOrNotFound} = E -> + rabbit_misc:const(E) + end end). maybe_auto_delete(#exchange{auto_delete = false}) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 3f0bc9bb..45f59999 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -54,7 +54,7 @@ -export([all_module_attributes/1, build_acyclic_graph/3]). -export([now_ms/0]). -export([lock_file/1]). --export([const_ok/1, const/1]). +-export([const_ok/0, const/1]). -export([ntoa/1, ntoab/1]). -export([is_process_alive/1]). @@ -191,7 +191,7 @@ digraph:vertex(), digraph:vertex()})). -spec(now_ms/0 :: () -> non_neg_integer()). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). --spec(const_ok/1 :: (any()) -> 'ok'). +-spec(const_ok/0 :: () -> 'ok'). -spec(const/1 :: (A) -> const(A)). -spec(ntoa/1 :: (inet:ip_address()) -> string()). -spec(ntoab/1 :: (inet:ip_address()) -> string()). @@ -409,13 +409,8 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> execute_mnesia_tx_with_tail(TxFun) -> case mnesia:is_transaction() of true -> execute_mnesia_transaction(TxFun); - false -> {TailFun, TailRes} = execute_mnesia_transaction( - fun () -> - TailFun1 = TxFun(), - Res1 = TailFun1(transaction), - {TailFun1, Res1} - end), - TailFun(TailRes) + false -> TailFun = execute_mnesia_transaction(TxFun), + TailFun() end. ensure_ok(ok, _) -> ok; @@ -847,8 +842,8 @@ lock_file(Path) -> ok = file:close(Lock) end. -const_ok(_) -> ok. -const(X) -> fun (_) -> X end. +const_ok() -> ok. +const(X) -> fun () -> X end. %% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see %% when IPv6 is enabled but not used (i.e. 99% of the time). -- cgit v1.2.1 From 52e4fe98abbaddad26e0547ce0cf6bf51dbaed9f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 15:46:52 +0100 Subject: Simplify a bit more. --- src/rabbit_binding.erl | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b765d5e1..b04bc886 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -410,35 +410,33 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> process_deletions(Deletions, transaction) -> process_deletions( - fun (Mode, X, Bindings, Acc) -> - pd_callback(transaction, Mode, X, Bindings), + fun (Event, X, Bindings, Acc) -> + pd_callback(transaction, Event, X, Bindings), dict:store(X, serial(X), Acc) end, - Deletions, dict:new(), true); + Deletions, dict:new()); process_deletions(Deletions, Serials) -> process_deletions( - fun (Mode, X, Bindings, Acc) -> - pd_callback(dict:fetch(X, Serials), Mode, X, Bindings), - case Mode of + fun (Event, X, Bindings, Acc) -> + [rabbit_event:notify(binding_deleted, info(B)) || B <- Bindings], + pd_callback(dict:fetch(X, Serials), Event, X, Bindings), + case Event of delete -> rabbit_event:notify(exchange_deleted, [{name, X#exchange.name}]); _ -> ok end, Acc end, - Deletions, ok, false). + Deletions, ok). -process_deletions(Fun, Deletions, Acc0, Tx) -> +process_deletions(Fun, Deletions, Acc0) -> dict:fold( fun (_XName, {X, Deleted, Bindings}, Acc) -> - FlatBindings = lists:flatten(Bindings), - [rabbit_event:notify_if(not Tx, binding_deleted, info(B)) || - B <- FlatBindings], - case Deleted of - not_deleted -> Fun(remove_bindings, X, FlatBindings, Acc); - deleted -> Fun(delete, X, FlatBindings, Acc) - end + Fun(case Deleted of + not_deleted -> remove_bindings; + deleted -> delete + end, X, lists:flatten(Bindings), Acc) end, Acc0, Deletions). pd_callback(Arg, CB, X, Bindings) -> -- cgit v1.2.1 From 0bc5d1ec85c0bce3ac1d89180248fac64e78e0a0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 16:41:43 +0100 Subject: Fix on_node_down. --- src/rabbit_amqqueue.erl | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 60b7b384..e300fa32 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -466,18 +466,17 @@ drop_expired(QPid) -> gen_server2:cast(QPid, drop_expired). on_node_down(Node) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> qlc:e(qlc:q([delete_queue(QueueName) || - #amqqueue{name = QueueName, pid = Pid} - <- mnesia:table(rabbit_queue), - node(Pid) == Node])) - end, - fun (Deletions, Tx) -> - rabbit_binding:process_deletions( - lists:foldl(fun rabbit_binding:combine_deletions/2, - rabbit_binding:new_deletions(), - Deletions), - Tx) + rabbit_misc:execute_mnesia_tx_with_tail( + fun () -> Dels = qlc:e(qlc:q([delete_queue(QueueName) || + #amqqueue{name = QueueName, pid = Pid} + <- mnesia:table(rabbit_queue), + node(Pid) == Node])), + Dels1 = lists:foldl(fun rabbit_binding:combine_deletions/2, + rabbit_binding:new_deletions(), Dels), + Serials = rabbit_binding:process_deletions(Dels1, transaction), + fun () -> + rabbit_binding:process_deletions(Dels1, Serials) + end end). delete_queue(QueueName) -> -- cgit v1.2.1 From 4c14d30f7f25451e677c6e2f11ef2f86a64da7c0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 17:54:01 +0100 Subject: Cosmetic. --- src/rabbit_binding.erl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b04bc886..13e829e2 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -410,21 +410,21 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> process_deletions(Deletions, transaction) -> process_deletions( - fun (Event, X, Bindings, Acc) -> - pd_callback(transaction, Event, X, Bindings), + fun (Deleted, X, Bindings, Acc) -> + pd_callback(transaction, Deleted, X, Bindings), dict:store(X, serial(X), Acc) end, Deletions, dict:new()); process_deletions(Deletions, Serials) -> process_deletions( - fun (Event, X, Bindings, Acc) -> + fun (Deleted, X, Bindings, Acc) -> [rabbit_event:notify(binding_deleted, info(B)) || B <- Bindings], - pd_callback(dict:fetch(X, Serials), Event, X, Bindings), - case Event of - delete -> rabbit_event:notify(exchange_deleted, + pd_callback(dict:fetch(X, Serials), Deleted, X, Bindings), + case Deleted of + deleted -> rabbit_event:notify(exchange_deleted, [{name, X#exchange.name}]); - _ -> ok + _ -> ok end, Acc end, @@ -433,14 +433,14 @@ process_deletions(Deletions, Serials) -> process_deletions(Fun, Deletions, Acc0) -> dict:fold( fun (_XName, {X, Deleted, Bindings}, Acc) -> - Fun(case Deleted of - not_deleted -> remove_bindings; - deleted -> delete - end, X, lists:flatten(Bindings), Acc) + Fun(Deleted, X, lists:flatten(Bindings), Acc) end, Acc0, Deletions). pd_callback(Arg, CB, X, Bindings) -> - ok = rabbit_exchange:callback(X, CB, [Arg, X, Bindings]). + ok = rabbit_exchange:callback(X, case CB of + not_deleted -> remove_bindings; + deleted -> delete + end, [Arg, X, Bindings]). serial(X) -> case rabbit_exchange:serialise_events(X) of -- cgit v1.2.1 From a517487a1e91efb27b2f3654b153ff0d6cbb5fbe Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 18:02:53 +0100 Subject: Better abstraction. --- src/rabbit_amqqueue.erl | 15 ++++--------- src/rabbit_binding.erl | 57 ++++++++++++++++++++++++------------------------- src/rabbit_exchange.erl | 10 +++------ 3 files changed, 35 insertions(+), 47 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e300fa32..167b1a55 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -433,11 +433,7 @@ internal_delete(QueueName) -> case mnesia:wread({rabbit_queue, QueueName}) of [] -> rabbit_misc:const({error, not_found}); [_] -> Deletions = internal_delete1(QueueName), - Serials = rabbit_binding:process_deletions( - Deletions, transaction), - fun () -> rabbit_binding:process_deletions( - Deletions, Serials) - end + rabbit_binding:process_deletions(Deletions) end end). @@ -471,12 +467,9 @@ on_node_down(Node) -> #amqqueue{name = QueueName, pid = Pid} <- mnesia:table(rabbit_queue), node(Pid) == Node])), - Dels1 = lists:foldl(fun rabbit_binding:combine_deletions/2, - rabbit_binding:new_deletions(), Dels), - Serials = rabbit_binding:process_deletions(Dels1, transaction), - fun () -> - rabbit_binding:process_deletions(Dels1, Serials) - end + rabbit_binding:process_deletions( + lists:foldl(fun rabbit_binding:combine_deletions/2, + rabbit_binding:new_deletions(), Dels)) end). delete_queue(QueueName) -> diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 13e829e2..31605844 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -21,7 +21,7 @@ -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, - process_deletions/2]). + process_deletions/1]). -export([info_keys/0, info/1, info/2, info_all/1, info_all/2]). %% these must all be run inside a mnesia tx -export([has_for_source/1, remove_for_source/1, @@ -77,7 +77,7 @@ (rabbit_types:binding_destination()) -> deletions()). -spec(remove_transient_for_destination/1 :: (rabbit_types:binding_destination()) -> deletions()). --spec(process_deletions/2 :: (deletions(), boolean()) -> 'ok'). +-spec(process_deletions/1 :: (deletions()) -> 'ok'). -spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()). -spec(add_deletion/3 :: (rabbit_exchange:name(), {'undefined' | rabbit_types:exchange(), @@ -160,11 +160,8 @@ remove(Binding, InnerFun) -> end end, case Result of - {error, _} = Err -> - rabbit_misc:const(Err); - {ok, Deletions} -> - Serials = process_deletions(Deletions, transaction), - fun () -> process_deletions(Deletions, Serials) end + {error, _} = Err -> rabbit_misc:const(Err); + {ok, Deletions} -> process_deletions(Deletions) end end). @@ -408,27 +405,29 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> anything_but(not_deleted, Deleted1, Deleted2), [Bindings1 | Bindings2]}. -process_deletions(Deletions, transaction) -> - process_deletions( - fun (Deleted, X, Bindings, Acc) -> - pd_callback(transaction, Deleted, X, Bindings), - dict:store(X, serial(X), Acc) - end, - Deletions, dict:new()); - -process_deletions(Deletions, Serials) -> - process_deletions( - fun (Deleted, X, Bindings, Acc) -> - [rabbit_event:notify(binding_deleted, info(B)) || B <- Bindings], - pd_callback(dict:fetch(X, Serials), Deleted, X, Bindings), - case Deleted of - deleted -> rabbit_event:notify(exchange_deleted, - [{name, X#exchange.name}]); - _ -> ok +process_deletions(Deletions) -> + Serials = process_deletions( + fun (Deleted, X, Bindings, Acc) -> + pd_callback(transaction, Deleted, X, Bindings), + dict:store(X, serial(X), Acc) + end, + Deletions, dict:new()), + fun() -> + process_deletions( + fun (Deleted, X, Bindings, Acc) -> + [rabbit_event:notify(binding_deleted, info(B)) || + B <- Bindings], + pd_callback(dict:fetch(X, Serials), Deleted, X, Bindings), + case Deleted of + deleted -> rabbit_event:notify( + exchange_deleted, + [{name, X#exchange.name}]); + _ -> ok + end, + Acc end, - Acc - end, - Deletions, ok). + Deletions, ok) + end. process_deletions(Fun, Deletions, Acc0) -> dict:fold( @@ -436,8 +435,8 @@ process_deletions(Fun, Deletions, Acc0) -> Fun(Deleted, X, lists:flatten(Bindings), Acc) end, Acc0, Deletions). -pd_callback(Arg, CB, X, Bindings) -> - ok = rabbit_exchange:callback(X, case CB of +pd_callback(Arg, Deleted, X, Bindings) -> + ok = rabbit_exchange:callback(X, case Deleted of not_deleted -> remove_bindings; deleted -> delete end, [Arg, X, Bindings]). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index e704a44c..c1c1d3c8 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -290,13 +290,9 @@ delete0(XName, Fun) -> fun (X) -> case Fun(X) of {deleted, X, Bs, Deletions} -> - Dels1 = rabbit_binding:add_deletion( - XName, {X, deleted, Bs}, Deletions), - Serials = rabbit_binding:process_deletions( - Dels1, transaction), - fun () -> - rabbit_binding:process_deletions(Dels1, Serials) - end; + rabbit_binding:process_deletions( + rabbit_binding:add_deletion( + XName, {X, deleted, Bs}, Deletions)); {error, _InUseOrNotFound} = E -> rabbit_misc:const(E) end -- cgit v1.2.1 From 084945d6936584cc868965c2987abc166b13120a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 18:04:38 +0100 Subject: Cosmetic. --- src/rabbit_binding.erl | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 31605844..4e85d08e 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -410,8 +410,7 @@ process_deletions(Deletions) -> fun (Deleted, X, Bindings, Acc) -> pd_callback(transaction, Deleted, X, Bindings), dict:store(X, serial(X), Acc) - end, - Deletions, dict:new()), + end, Deletions, dict:new()), fun() -> process_deletions( fun (Deleted, X, Bindings, Acc) -> @@ -425,15 +424,13 @@ process_deletions(Deletions) -> _ -> ok end, Acc - end, - Deletions, ok) + end, Deletions, ok) end. process_deletions(Fun, Deletions, Acc0) -> - dict:fold( - fun (_XName, {X, Deleted, Bindings}, Acc) -> - Fun(Deleted, X, lists:flatten(Bindings), Acc) - end, Acc0, Deletions). + dict:fold(fun (_XName, {X, Deleted, Bindings}, Acc) -> + Fun(Deleted, X, lists:flatten(Bindings), Acc) + end, Acc0, Deletions). pd_callback(Arg, Deleted, X, Bindings) -> ok = rabbit_exchange:callback(X, case Deleted of -- cgit v1.2.1 From 1257dd95a1c6815a4b9e59099a6fb4ffb2f9fd31 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 18:05:40 +0100 Subject: Cosmetic. I'm sure at some point Prev was used for something. --- src/rabbit_binding.erl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 4e85d08e..645b42fa 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -445,11 +445,10 @@ serial(X) -> end. next_serial(#exchange{name = Name}) -> - Prev = case mnesia:read(rabbit_exchange_serial, Name, write) of - [] -> 0; - [#exchange_serial{serial = S}] -> S - end, - Serial = Prev + 1, + Serial = case mnesia:read(rabbit_exchange_serial, Name, write) of + [] -> 1; + [#exchange_serial{serial = S}] -> S + 1 + end, mnesia:write(rabbit_exchange_serial, #exchange_serial{name = Name, serial = Serial}, write), Serial. -- cgit v1.2.1 From cbceafa29b1e8dbad97b3cf1eab548d4cd44001b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 18:06:44 +0100 Subject: Comment. --- src/rabbit_exchange_type.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 468ee0b4..d1563a62 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -22,9 +22,10 @@ behaviour_info(callbacks) -> [ {description, 0}, - %% Should Rabbit ensure that all events delivered to an individual exchange - %% this can be serialised? (they might still be delivered out - %% of order, but there'll be a serial number). + %% Should Rabbit ensure that all binding events that are + %% delivered to an individual exchange can be serialised? (they + %% might still be delivered out of order, but there'll be a + %% serial number). {serialise_events, 0}, {route, 2}, -- cgit v1.2.1 From efd64c5389b101b36a3b95408b2f7d03f4f01b92 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 28 Mar 2011 18:08:41 +0100 Subject: Cosmetic: move serialise_events. --- src/rabbit_exchange_type_direct.erl | 5 +++-- src/rabbit_exchange_type_fanout.erl | 5 +++-- src/rabbit_exchange_type_headers.erl | 5 +++-- src/rabbit_exchange_type_topic.erl | 5 +++-- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index b99ee27d..687567a8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, serialise_events/0, route/2]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -35,11 +35,12 @@ description() -> [{name, <<"direct">>}, {description, <<"AMQP direct exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + route(#exchange{name = Name}, #delivery{message = #basic_message{routing_keys = Routes}}) -> rabbit_router:match_routing_key(Name, Routes). -serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index e12c9964..cbde0dd2 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -19,7 +19,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, serialise_events/0, route/2]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -35,10 +35,11 @@ description() -> [{name, <<"fanout">>}, {description, <<"AMQP fanout exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). -serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index f571978f..89f8fcfb 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, serialise_events/0, route/2]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -41,6 +41,8 @@ description() -> [{name, <<"headers">>}, {description, <<"AMQP headers exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + route(#exchange{name = Name}, #delivery{message = #basic_message{content = Content}}) -> Headers = case (Content#content.properties)#'P_basic'.headers of @@ -112,7 +114,6 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], end, headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). -serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. recover(_X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 1bd91dcb..7f3d83e0 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). --export([description/0, route/2, serialise_events/0]). +-export([description/0, serialise_events/0, route/2]). -export([validate/1, create/2, recover/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -38,6 +38,8 @@ description() -> [{name, <<"topic">>}, {description, <<"AMQP topic exchange, as per the AMQP specification">>}]. +serialise_events() -> false. + %% NB: This may return duplicate results in some situations (that's ok) route(#exchange{name = X}, #delivery{message = #basic_message{routing_keys = Routes}}) -> @@ -46,7 +48,6 @@ route(#exchange{name = X}, mnesia:async_dirty(fun trie_match/2, [X, Words]) end || RKey <- Routes]). -serialise_events() -> false. validate(_X) -> ok. create(_Tx, _X) -> ok. -- cgit v1.2.1 From b620a4d31d801cea9a380428f80094a5423037b6 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 20:43:32 +0100 Subject: cosmetic --- src/rabbit_binding.erl | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 645b42fa..13362232 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -406,33 +406,29 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> [Bindings1 | Bindings2]}. process_deletions(Deletions) -> - Serials = process_deletions( - fun (Deleted, X, Bindings, Acc) -> - pd_callback(transaction, Deleted, X, Bindings), + Serials = dict:fold( + fun (_XName, {X, Deleted, Bindings}, Acc) -> + FlatBindings = lists:flatten(Bindings), + pd_callback(transaction, X, Deleted, FlatBindings), dict:store(X, serial(X), Acc) end, Deletions, dict:new()), fun() -> - process_deletions( - fun (Deleted, X, Bindings, Acc) -> + dict:fold( + fun (XName, {X, Deleted, Bindings}, ok) -> + FlatBindings = lists:flatten(Bindings), + Serial = dict:fetch(X, Serials), + pd_callback(Serial, X, Deleted, FlatBindings), [rabbit_event:notify(binding_deleted, info(B)) || - B <- Bindings], - pd_callback(dict:fetch(X, Serials), Deleted, X, Bindings), + B <- FlatBindings], case Deleted of - deleted -> rabbit_event:notify( - exchange_deleted, - [{name, X#exchange.name}]); + deleted -> ok = rabbit_event:notify( + exchange_deleted, [{name, XName}]); _ -> ok - end, - Acc + end end, Deletions, ok) end. -process_deletions(Fun, Deletions, Acc0) -> - dict:fold(fun (_XName, {X, Deleted, Bindings}, Acc) -> - Fun(Deleted, X, lists:flatten(Bindings), Acc) - end, Acc0, Deletions). - -pd_callback(Arg, Deleted, X, Bindings) -> +pd_callback(Arg, X, Deleted, Bindings) -> ok = rabbit_exchange:callback(X, case Deleted of not_deleted -> remove_bindings; deleted -> delete -- cgit v1.2.1 From b5676465bfed9d4dda43b50043c2d67b5b595e2e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:10:20 +0100 Subject: more sensible order of exchange exports --- src/rabbit_exchange.erl | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a463e570..b5d38b75 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -18,12 +18,13 @@ -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([recover/0, declare/6, lookup/1, lookup_or_die/1, list/1, info_keys/0, - info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). --export([callback/3]). +-export([recover/0, callback/3, declare/6, + assert_equivalence/6, assert_args_equivalence/2, check_type/1, + lookup/1, lookup_or_die/1, list/1, + info_keys/0, info/1, info/2, info_all/1, info_all/2, + publish/2, delete/2]). %% this must be run inside a mnesia tx -export([maybe_auto_delete/1]). --export([assert_equivalence/6, assert_args_equivalence/2, check_type/1]). %%---------------------------------------------------------------------------- @@ -33,8 +34,10 @@ -type(name() :: rabbit_types:r('exchange')). -type(type() :: atom()). +-type(fun_name() :: atom()). -spec(recover/0 :: () -> 'ok'). +-spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), rabbit_framing:amqp_table()) @@ -72,7 +75,6 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(callback/3:: (rabbit_types:exchange(), atom(), [any()]) -> 'ok'). -endif. @@ -101,6 +103,9 @@ recover_with_bindings(Bs, [X = #exchange{type = Type} | Xs], Bindings) -> recover_with_bindings([], [], []) -> ok. +callback(#exchange{type = XType}, Fun, Args) -> + apply(type_to_module(XType), Fun, Args). + declare(XName, Type, Durable, AutoDelete, Internal, Args) -> X = #exchange{name = XName, type = Type, @@ -294,9 +299,6 @@ maybe_auto_delete(#exchange{auto_delete = true} = X) -> {deleted, X, [], Deletions} -> {deleted, Deletions} end. -callback(#exchange{type = XType}, Fun, Args) -> - apply(type_to_module(XType), Fun, Args). - conditional_delete(X = #exchange{name = XName}) -> case rabbit_binding:has_for_source(XName) of false -> unconditional_delete(X); -- cgit v1.2.1 From 6465f6639b0e73f4080317dec82fac1e7397e090 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:13:36 +0100 Subject: tweak: only invoke rabbit_exchange:callback when absolutely necessary --- src/rabbit_exchange.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b5d38b75..cab6510b 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -131,7 +131,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - callback(Exchange, create, [Tx, Exchange]), + ok = (type_to_module(Type)):create(Tx, Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> -- cgit v1.2.1 From a438017121d00695475817ce3f8fef1a525d4e26 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:25:43 +0100 Subject: cosmetic --- src/rabbit_exchange.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index cab6510b..9d9b07af 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -140,11 +140,6 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> Err end). -%% Used with atoms from records; e.g., the type is expected to exist. -type_to_module(T) -> - {ok, Module} = rabbit_registry:lookup_module(exchange, T), - Module. - %% Used with binaries sent over the wire; the type may not exist. check_type(TypeBin) -> case rabbit_registry:binary_to_type(TypeBin) of @@ -310,3 +305,8 @@ unconditional_delete(X = #exchange{name = XName}) -> ok = mnesia:delete({rabbit_exchange, XName}), Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. + +%% Used with atoms from records; e.g., the type is expected to exist. +type_to_module(T) -> + {ok, Module} = rabbit_registry:lookup_module(exchange, T), + Module. -- cgit v1.2.1 From c95652b13944708a6de4f818bd1c9764d1d1c77e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:46:30 +0100 Subject: only look up exchange type once ...and fix a bug in the XT:create invocation - the serial wasn't being passed through. --- src/rabbit_exchange.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 7813db2a..3bd667a5 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -113,8 +113,9 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> auto_delete = AutoDelete, internal = Internal, arguments = Args}, + XT = type_to_module(Type), %% We want to upset things if it isn't ok - ok = (type_to_module(Type)):validate(X), + ok = XT:validate(X), rabbit_misc:execute_mnesia_transaction( fun () -> case mnesia:wread({rabbit_exchange, XName}) of @@ -131,14 +132,13 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - S = case Tx of - true -> transaction; - false -> case serialise_events(Exchange) of - true -> 0; - false -> none - end - end, - ok = (type_to_module(Type)):create(Tx, Exchange), + ok = XT:create(case Tx of + true -> transaction; + false -> case XT:serialise_events() of + true -> 0; + false -> none + end + end, Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> -- cgit v1.2.1 From baf3572c49471b780a88475baa92feb64749f90a Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:47:45 +0100 Subject: refactor: move all serial manipulation code into 'exchange' module --- src/rabbit_binding.erl | 19 ++----------------- src/rabbit_exchange.erl | 27 ++++++++++++++++++++------- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 13362232..1092948a 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -133,7 +133,7 @@ add(Binding, InnerFun) -> add_notify(Src, Dst, B) -> ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), ok = rabbit_exchange:callback(Src, add_binding, [transaction, Src, B]), - Serial = serial(Src), + Serial = rabbit_exchange:serial(Src), fun () -> ok = rabbit_exchange:callback(Src, add_binding, [Serial, Src, B]), ok = rabbit_event:notify(binding_created, info(B)) @@ -410,7 +410,7 @@ process_deletions(Deletions) -> fun (_XName, {X, Deleted, Bindings}, Acc) -> FlatBindings = lists:flatten(Bindings), pd_callback(transaction, X, Deleted, FlatBindings), - dict:store(X, serial(X), Acc) + dict:store(X, rabbit_exchange:serial(X), Acc) end, Deletions, dict:new()), fun() -> dict:fold( @@ -433,18 +433,3 @@ pd_callback(Arg, X, Deleted, Bindings) -> not_deleted -> remove_bindings; deleted -> delete end, [Arg, X, Bindings]). - -serial(X) -> - case rabbit_exchange:serialise_events(X) of - true -> next_serial(X); - false -> none - end. - -next_serial(#exchange{name = Name}) -> - Serial = case mnesia:read(rabbit_exchange_serial, Name, write) of - [] -> 1; - [#exchange_serial{serial = S}] -> S + 1 - end, - mnesia:write(rabbit_exchange_serial, - #exchange_serial{name = Name, serial = Serial}, write), - Serial. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 3bd667a5..3c42ed3c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -22,9 +22,9 @@ assert_equivalence/6, assert_args_equivalence/2, check_type/1, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, - publish/2, delete/2, serialise_events/1]). -%% this must be run inside a mnesia tx --export([maybe_auto_delete/1]). + publish/2, delete/2]). +%% these must be run inside a mnesia tx +-export([maybe_auto_delete/1, serial/1]). %%---------------------------------------------------------------------------- @@ -72,10 +72,11 @@ (name(), boolean())-> 'ok' | rabbit_types:error('not_found') | rabbit_types:error('in_use')). --spec(serialise_events/1:: (rabbit_types:exchange()) -> boolean()). -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). +-spec(serial/1:: (rabbit_types:exchange()) -> 'none' | pos_integer()). + -endif. %%---------------------------------------------------------------------------- @@ -298,9 +299,6 @@ delete0(XName, Fun) -> end end). -serialise_events(#exchange{type = XType}) -> - apply(type_to_module(XType), serialise_events, []). - maybe_auto_delete(#exchange{auto_delete = false}) -> not_deleted; maybe_auto_delete(#exchange{auto_delete = true} = X) -> @@ -322,6 +320,21 @@ unconditional_delete(X = #exchange{name = XName}) -> Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. +serial(#exchange{name = XName, type = XType}) -> + case (type_to_module(XType)):serialise_events() of + true -> next_serial(XName); + false -> none + end. + +next_serial(XName) -> + Serial1 = case mnesia:read(rabbit_exchange_serial, XName, write) of + [] -> 1; + [#exchange_serial{serial = Serial}] -> Serial + 1 + end, + mnesia:write(rabbit_exchange_serial, + #exchange_serial{name = XName, serial = Serial1}, write), + Serial1. + %% Used with atoms from records; e.g., the type is expected to exist. type_to_module(T) -> {ok, Module} = rabbit_registry:lookup_module(exchange, T), -- cgit v1.2.1 From 6459424a0c246cd21b64d46d3b16a950487d5ab1 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 21:53:40 +0100 Subject: cosmetic --- src/rabbit_exchange.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 3c42ed3c..ff15ce3a 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -280,12 +280,10 @@ call_with_exchange(XName, Fun) -> end). delete(XName, IfUnused) -> - delete0(XName, case IfUnused of - true -> fun conditional_delete/1; - false -> fun unconditional_delete/1 - end). - -delete0(XName, Fun) -> + Fun = case IfUnused of + true -> fun conditional_delete/1; + false -> fun unconditional_delete/1 + end, call_with_exchange( XName, fun (X) -> -- cgit v1.2.1 From 38d77c1bf557479c892c04da8f5f6d390847474c Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 22:11:42 +0100 Subject: refactor: make add_notify do what it says (and no more) --- src/rabbit_binding.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1092948a..b6324bb5 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -122,7 +122,9 @@ add(Binding, InnerFun) -> case InnerFun(Src, Dst) of ok -> case mnesia:read({rabbit_route, B}) of - [] -> add_notify(Src, Dst, B); + [] -> ok = sync_binding(B, all_durable([Src, Dst]), + fun mnesia:write/3), + add_notify(Src, B); [_] -> fun rabbit_misc:const_ok/0 end; {error, _} = Err -> @@ -130,12 +132,11 @@ add(Binding, InnerFun) -> end end). -add_notify(Src, Dst, B) -> - ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), - ok = rabbit_exchange:callback(Src, add_binding, [transaction, Src, B]), - Serial = rabbit_exchange:serial(Src), +add_notify(X, B) -> + ok = rabbit_exchange:callback(X, add_binding, [transaction, X, B]), + Serial = rabbit_exchange:serial(X), fun () -> - ok = rabbit_exchange:callback(Src, add_binding, [Serial, Src, B]), + ok = rabbit_exchange:callback(X, add_binding, [Serial, X, B]), ok = rabbit_event:notify(binding_created, info(B)) end. -- cgit v1.2.1 From 1847c3874b081de80ac50225cedd08dca92d0d14 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 28 Mar 2011 22:13:57 +0100 Subject: index the serial dict by XName rather than X ...which should be more efficient --- src/rabbit_binding.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b6324bb5..3bf8e6a9 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -408,16 +408,16 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> process_deletions(Deletions) -> Serials = dict:fold( - fun (_XName, {X, Deleted, Bindings}, Acc) -> + fun (XName, {X, Deleted, Bindings}, Acc) -> FlatBindings = lists:flatten(Bindings), pd_callback(transaction, X, Deleted, FlatBindings), - dict:store(X, rabbit_exchange:serial(X), Acc) + dict:store(XName, rabbit_exchange:serial(X), Acc) end, Deletions, dict:new()), fun() -> dict:fold( fun (XName, {X, Deleted, Bindings}, ok) -> FlatBindings = lists:flatten(Bindings), - Serial = dict:fetch(X, Serials), + Serial = dict:fetch(XName, Serials), pd_callback(Serial, X, Deleted, FlatBindings), [rabbit_event:notify(binding_deleted, info(B)) || B <- FlatBindings], -- cgit v1.2.1 From e876d31d003d2a234e9848101ef1545720e25a4e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 29 Mar 2011 10:31:34 +0100 Subject: Fix Matthias' deliberate mistake. --- src/rabbit_binding.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 3bf8e6a9..54fa1a63 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -412,7 +412,7 @@ process_deletions(Deletions) -> FlatBindings = lists:flatten(Bindings), pd_callback(transaction, X, Deleted, FlatBindings), dict:store(XName, rabbit_exchange:serial(X), Acc) - end, Deletions, dict:new()), + end, dict:new(), Deletions), fun() -> dict:fold( fun (XName, {X, Deleted, Bindings}, ok) -> @@ -426,7 +426,7 @@ process_deletions(Deletions) -> exchange_deleted, [{name, XName}]); _ -> ok end - end, Deletions, ok) + end, ok, Deletions) end. pd_callback(Arg, X, Deleted, Bindings) -> -- cgit v1.2.1 From 5baea669ec65f80dae2064efa02d039956034575 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 29 Mar 2011 11:25:21 +0100 Subject: Improve documentation of BQ concerning the issuance of confirms --- src/rabbit_backing_queue.erl | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index a15ff846..fe09e400 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -39,13 +39,12 @@ behaviour_info(callbacks) -> %% 2. a boolean indicating whether the queue is durable %% 3. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which accepts a function from - %% state to state and invokes it with the current backing - %% queue state. This is useful for handling events, e.g. when - %% the backing queue does not have its own process to receive - %% such events, or when the processing of an event results in - %% a state transition the queue logic needs to know about - %% (such as messages getting confirmed). + %% 4. an asynchronous callback which accepts a function of type + %% backing-queue-state to backing-queue-state. This callback + %% function can be safely invoked from any process, which + %% makes it useful for passing messages back into the backing + %% queue, especially as the backing queue does not have + %% control of its own mailbox. %% 5. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. {init, 5}, @@ -71,6 +70,31 @@ behaviour_info(callbacks) -> %% Return ids of messages which have been confirmed since %% the last invocation of this function (or initialisation). + %% + %% Message ids should only appear in the result of + %% drain_confirmed under the following circumstances: + %% + %% 1. The message appears in a call to publish_delivered/4 and + %% the first argument (ack_required) is false; or + %% 2. The message is fetched from the queue with fetch/2 and the + %% first argument (ack_required) is false; or + %% 3. The message is acked (ack/2 is called for the message); or + %% 4. The message is fully fsync'd to disk in such a way that the + %% recovery of the message is guaranteed in the event of a + %% crash of this rabbit node (excluding hardware failure). + %% + %% In addition to the above conditions, a message id may only + %% appear in the result of drain_confirmed if + %% #message_properties.needs_confirming = true when the msg was + %% published (through whichever means) to the backing queue. + %% + %% It is legal for the same message id to appear in the results + %% of multiple calls to drain_confirmed, which means that the + %% backing queue is not required to keep track of the which + %% messages it has already confirmed. The confirm will be issued + %% to the publisher the first time the message id appears in the + %% result of drain_confirmed. All subsequent appearances of that + %% message id will be ignored. {drain_confirmed, 1}, %% Drop messages from the head of the queue while the supplied -- cgit v1.2.1 From 95ca5fddb6e4ff306580f2be8945353e52791282 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 29 Mar 2011 11:27:29 +0100 Subject: english --- src/rabbit_backing_queue.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index fe09e400..0ca8d260 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -90,11 +90,11 @@ behaviour_info(callbacks) -> %% %% It is legal for the same message id to appear in the results %% of multiple calls to drain_confirmed, which means that the - %% backing queue is not required to keep track of the which - %% messages it has already confirmed. The confirm will be issued - %% to the publisher the first time the message id appears in the - %% result of drain_confirmed. All subsequent appearances of that - %% message id will be ignored. + %% backing queue is not required to keep track of which messages + %% it has already confirmed. The confirm will be issued to the + %% publisher the first time the message id appears in the result + %% of drain_confirmed. All subsequent appearances of that message + %% id will be ignored. {drain_confirmed, 1}, %% Drop messages from the head of the queue while the supplied -- cgit v1.2.1 From 85c36851e257c454a3e1039160fe7b08eb65f2eb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 29 Mar 2011 11:29:13 +0100 Subject: Don't attempt to provide a serial for the x deleted event, we weren't doing it right anyway. Create the table entry at exchange creation time. --- include/rabbit_exchange_type_spec.hrl | 7 +++--- src/rabbit_binding.erl | 8 +++++-- src/rabbit_exchange.erl | 40 +++++++++++++++++++++-------------- 3 files changed, 34 insertions(+), 21 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 9458d2fe..bb23445d 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -16,17 +16,18 @@ -ifdef(use_specs). --type(serial() :: pos_integer() | 'transaction' | 'none'). +-type(tx() :: 'transaction' | 'none'). +-type(serial() :: pos_integer() | tx()). -spec(description/0 :: () -> [{atom(), any()}]). -spec(serialise_events/0 :: () -> boolean()). -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). --spec(create/2 :: (serial(), rabbit_types:exchange()) -> 'ok'). +-spec(create/2 :: (tx(), rabbit_types:exchange()) -> 'ok'). -spec(recover/2 :: (rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(delete/3 :: (serial(), rabbit_types:exchange(), +-spec(delete/3 :: (tx(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(add_binding/3 :: (serial(), rabbit_types:exchange(), rabbit_types:binding()) -> 'ok'). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 54fa1a63..6384cf0e 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -134,7 +134,7 @@ add(Binding, InnerFun) -> add_notify(X, B) -> ok = rabbit_exchange:callback(X, add_binding, [transaction, X, B]), - Serial = rabbit_exchange:serial(X), + Serial = rabbit_exchange:serial(X, binding), fun () -> ok = rabbit_exchange:callback(X, add_binding, [Serial, X, B]), ok = rabbit_event:notify(binding_created, info(B)) @@ -411,7 +411,11 @@ process_deletions(Deletions) -> fun (XName, {X, Deleted, Bindings}, Acc) -> FlatBindings = lists:flatten(Bindings), pd_callback(transaction, X, Deleted, FlatBindings), - dict:store(XName, rabbit_exchange:serial(X), Acc) + dict:store(XName, rabbit_exchange:serial( + X, case Deleted of + deleted -> exchange; + not_deleted -> binding + end), Acc) end, dict:new(), Deletions), fun() -> dict:fold( diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index ff15ce3a..067df560 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -24,7 +24,7 @@ info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). %% these must be run inside a mnesia tx --export([maybe_auto_delete/1, serial/1]). +-export([maybe_auto_delete/1, serial/2]). %%---------------------------------------------------------------------------- @@ -75,7 +75,8 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(serial/1:: (rabbit_types:exchange()) -> 'none' | pos_integer()). +-spec(serial/2:: (rabbit_types:exchange(), 'exchange' | 'binding') + -> 'none' | pos_integer()). -endif. @@ -86,7 +87,7 @@ recover() -> Xs = rabbit_misc:table_fold( fun (X, Acc) -> - ok = mnesia:write(rabbit_exchange, X, write), + write_exchange(X), [X | Acc] end, [], rabbit_durable_exchange), Bs = rabbit_binding:recover(), @@ -121,7 +122,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> fun () -> case mnesia:wread({rabbit_exchange, XName}) of [] -> - ok = mnesia:write(rabbit_exchange, X, write), + write_exchange(X), ok = case Durable of true -> mnesia:write(rabbit_durable_exchange, X, write); @@ -135,10 +136,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> fun ({new, Exchange}, Tx) -> ok = XT:create(case Tx of true -> transaction; - false -> case XT:serialise_events() of - true -> 0; - false -> none - end + false -> none end, Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; @@ -148,6 +146,14 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> Err end). +write_exchange(X = #exchange{name = Name, type = XT}) -> + ok = mnesia:write(rabbit_exchange, X, write), + case (type_to_module(XT)):serialise_events() of + true -> S = #exchange_serial{name = Name, serial = 0}, + ok = mnesia:write(rabbit_exchange_serial, S, write); + false -> ok + end. + %% Used with binaries sent over the wire; the type may not exist. check_type(TypeBin) -> case rabbit_registry:binary_to_type(TypeBin) of @@ -318,20 +324,22 @@ unconditional_delete(X = #exchange{name = XName}) -> Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. -serial(#exchange{name = XName, type = XType}) -> +serial(#exchange{}, exchange) -> + none; + +serial(#exchange{name = XName, type = XType}, binding) -> case (type_to_module(XType)):serialise_events() of true -> next_serial(XName); false -> none end. next_serial(XName) -> - Serial1 = case mnesia:read(rabbit_exchange_serial, XName, write) of - [] -> 1; - [#exchange_serial{serial = Serial}] -> Serial + 1 - end, - mnesia:write(rabbit_exchange_serial, - #exchange_serial{name = XName, serial = Serial1}, write), - Serial1. + [#exchange_serial{serial = S}] = + mnesia:read(rabbit_exchange_serial, XName, write), + Serial = S + 1, + ok = mnesia:write(rabbit_exchange_serial, + #exchange_serial{name = XName, serial = Serial}, write), + Serial. %% Used with atoms from records; e.g., the type is expected to exist. type_to_module(T) -> -- cgit v1.2.1 From e74c128acbc9d1296469040c4f2139141dcb3e3b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 29 Mar 2011 12:01:30 +0100 Subject: Go back to serial/1. --- src/rabbit_binding.erl | 18 +++++++++++------- src/rabbit_exchange.erl | 10 +++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 6384cf0e..db7b603e 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -134,7 +134,7 @@ add(Binding, InnerFun) -> add_notify(X, B) -> ok = rabbit_exchange:callback(X, add_binding, [transaction, X, B]), - Serial = rabbit_exchange:serial(X, binding), + Serial = rabbit_exchange:serial(X), fun () -> ok = rabbit_exchange:callback(X, add_binding, [Serial, X, B]), ok = rabbit_event:notify(binding_created, info(B)) @@ -411,17 +411,21 @@ process_deletions(Deletions) -> fun (XName, {X, Deleted, Bindings}, Acc) -> FlatBindings = lists:flatten(Bindings), pd_callback(transaction, X, Deleted, FlatBindings), - dict:store(XName, rabbit_exchange:serial( - X, case Deleted of - deleted -> exchange; - not_deleted -> binding - end), Acc) + case Deleted of + deleted -> Acc; + not_deleted -> dict:store(XName, + rabbit_exchange:serial(X), + Acc) + end end, dict:new(), Deletions), fun() -> dict:fold( fun (XName, {X, Deleted, Bindings}, ok) -> FlatBindings = lists:flatten(Bindings), - Serial = dict:fetch(XName, Serials), + Serial = case Deleted of + deleted -> none; + not_deleted -> dict:fetch(XName, Serials) + end, pd_callback(Serial, X, Deleted, FlatBindings), [rabbit_event:notify(binding_deleted, info(B)) || B <- FlatBindings], diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 067df560..1a8d8bed 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -24,7 +24,7 @@ info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). %% these must be run inside a mnesia tx --export([maybe_auto_delete/1, serial/2]). +-export([maybe_auto_delete/1, serial/1]). %%---------------------------------------------------------------------------- @@ -75,8 +75,7 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(serial/2:: (rabbit_types:exchange(), 'exchange' | 'binding') - -> 'none' | pos_integer()). +-spec(serial/1:: (rabbit_types:exchange()) -> 'none' | pos_integer()). -endif. @@ -324,10 +323,7 @@ unconditional_delete(X = #exchange{name = XName}) -> Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. -serial(#exchange{}, exchange) -> - none; - -serial(#exchange{name = XName, type = XType}, binding) -> +serial(#exchange{name = XName, type = XType}) -> case (type_to_module(XType)):serialise_events() of true -> next_serial(XName); false -> none -- cgit v1.2.1 From d15c336dce577e7b020df8501316421c478658e7 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 29 Mar 2011 12:14:03 +0100 Subject: Dialyser --- src/rabbit_amqqueue.erl | 4 ++-- src/rabbit_binding.erl | 2 +- src/rabbit_misc.erl | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 167b1a55..e55b0098 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -139,8 +139,8 @@ -spec(internal_delete/1 :: (name()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit() | - fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | - rabbit_types:connection_exit())). + fun (() -> rabbit_types:ok_or_error('not_found') | + rabbit_types:connection_exit())). -spec(run_backing_queue/2 :: (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(run_backing_queue_async/2 :: diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index db7b603e..ed85b9ea 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -77,7 +77,7 @@ (rabbit_types:binding_destination()) -> deletions()). -spec(remove_transient_for_destination/1 :: (rabbit_types:binding_destination()) -> deletions()). --spec(process_deletions/1 :: (deletions()) -> 'ok'). +-spec(process_deletions/1 :: (deletions()) -> fun(() -> 'ok')). -spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()). -spec(add_deletion/3 :: (rabbit_exchange:name(), {'undefined' | rabbit_types:exchange(), diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 45f59999..7d3476e6 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -66,7 +66,7 @@ -type(ok_or_error() :: rabbit_types:ok_or_error(any())). -type(thunk(T) :: fun(() -> T)). --type(const(T) :: fun((any()) -> T)). +-type(const(T) :: fun(() -> T)). -type(resource_name() :: binary()). -type(optdef() :: {flag, string()} | {option, string(), any()}). -type(channel_or_connection_exit() -- cgit v1.2.1 From 0c42f486246b1d1b43c44015172682be589128a1 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 29 Mar 2011 12:18:51 +0100 Subject: err... --- src/rabbit_binding.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index ed85b9ea..204be5f6 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -77,7 +77,7 @@ (rabbit_types:binding_destination()) -> deletions()). -spec(remove_transient_for_destination/1 :: (rabbit_types:binding_destination()) -> deletions()). --spec(process_deletions/1 :: (deletions()) -> fun(() -> 'ok')). +-spec(process_deletions/1 :: (deletions()) -> rabbit_misc:const('ok')). -spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()). -spec(add_deletion/3 :: (rabbit_exchange:name(), {'undefined' | rabbit_types:exchange(), -- cgit v1.2.1 From 5fa737c8a73b5f2e64afd6e287c0e855ca7f712a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 29 Mar 2011 14:11:19 +0100 Subject: Avoid using file:write_file --- src/rabbit_misc.erl | 37 +++++++++++++++++++++++++++++++++---- src/rabbit_prelaunch.erl | 3 ++- src/rabbit_tests.erl | 2 +- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 2e9563cf..9156d87e 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -41,6 +41,7 @@ -export([table_fold/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). +-export([write_file/3, run_ok_monad/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). -export([format_stderr/2]). -export([start_applications/1, stop_applications/1]). @@ -62,7 +63,7 @@ -ifdef(use_specs). --export_type([resource_name/0, thunk/1, const/1]). +-export_type([resource_name/0, thunk/1, const/1, ok_monad_fun/0]). -type(ok_or_error() :: rabbit_types:ok_or_error(any())). -type(thunk(T) :: fun(() -> T)). @@ -76,6 +77,8 @@ fun ((atom(), [term()]) -> [{digraph:vertex(), digraph_label()}])). -type(graph_edge_fun() :: fun ((atom(), [term()]) -> [{digraph:vertex(), digraph:vertex()}])). +-type(ok_monad_fun() :: + fun((any()) -> 'ok' | rabbit_types:ok_or_error2(any(), any()))). -spec(method_record_type/1 :: (rabbit_framing:amqp_method_record()) -> rabbit_framing:amqp_method_name()). @@ -154,6 +157,9 @@ -spec(read_term_file/1 :: (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())). -spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()). +-spec(write_file/3 :: (file:filename(), boolean(), binary()) -> ok_or_error()). +-spec(run_ok_monad/2 :: ([ok_monad_fun()], any()) -> + rabbit_types:ok_or_error(any())). -spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). -spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). -spec(format_stderr/2 :: (string(), [any()]) -> 'ok'). @@ -513,8 +519,31 @@ dirty_dump_log1(LH, {K, Terms, BadBytes}) -> read_term_file(File) -> file:consult(File). write_term_file(File, Terms) -> - file:write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) || - Term <- Terms])). + write_file(File, false, list_to_binary([io_lib:format("~w.~n", [Term]) || + Term <- Terms])). + +write_file(Path, Append, Binary) when is_binary(Binary) -> + Modes = [binary, write, raw | case Append of + true -> [read]; + false -> [] + end], + run_ok_monad( + [fun (ok) -> file:open(Path, Modes) end, + fun (Hdl) -> run_ok_monad( + [fun (ok) -> file:position(Hdl, eof) end, + fun (_Pos) -> file:write(Hdl, Binary) end, + fun (_Pos) -> file:sync(Hdl) end, + fun (_Pos) -> file:close(Hdl) end], ok) + end], ok). + +run_ok_monad([], _State) -> + ok; +run_ok_monad([Fun|Funs], State) -> + case Fun(State) of + ok -> run_ok_monad(Funs, State); + {ok, State1} -> run_ok_monad(Funs, State1); + {error, _Err} = Error -> Error + end. append_file(File, Suffix) -> case file:read_file_info(File) of @@ -532,7 +561,7 @@ append_file(File, 0, Suffix) -> end; append_file(File, _, Suffix) -> case file:read_file(File) of - {ok, Data} -> file:write_file([File, Suffix], Data, [append]); + {ok, Data} -> write_file(File ++ Suffix, true, Data); Error -> Error end. diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 8800e8d6..9874ba03 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -67,7 +67,8 @@ start() -> AppVersions}, %% Write it out to $RABBITMQ_PLUGINS_EXPAND_DIR/rabbit.rel - file:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])), + rabbit_misc:write_file(RootName ++ ".rel", false, + list_to_binary(io_lib:format("~p.~n", [RDesc]))), %% Compile the script ScriptFile = RootName ++ ".script", diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ca046c91..f4376293 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1634,7 +1634,7 @@ test_file_handle_cache() -> [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]], Content = <<"foo">>, CopyFun = fun (Src, Dst) -> - ok = file:write_file(Src, Content), + ok = rabbit_misc:write_file(Src, false, Content), {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), Size = size(Content), -- cgit v1.2.1 From 254a2dadf6814b782b7debfd1e3fe95f8e17739f Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 29 Mar 2011 16:03:36 +0100 Subject: cosmetic --- src/rabbit_control.erl | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8364ecd8..571eb5e4 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -382,12 +382,9 @@ rpc_call(Node, Mod, Fun, Args) -> %% characters. We don't escape characters above 127, since they may %% form part of UTF-8 strings. -escape(Atom) when is_atom(Atom) -> - escape(atom_to_list(Atom)); -escape(Bin) when is_binary(Bin) -> - escape(binary_to_list(Bin)); -escape(L) when is_list(L) -> - escape_char(lists:reverse(L), []). +escape(Atom) when is_atom(Atom) -> escape(atom_to_list(Atom)); +escape(Bin) when is_binary(Bin) -> escape(binary_to_list(Bin)); +escape(L) when is_list(L) -> escape_char(lists:reverse(L), []). escape_char([$\\ | T], Acc) -> escape_char(T, [$\\, $\\ | Acc]); @@ -402,19 +399,15 @@ escape_char([], Acc) -> prettify_amqp_table(Table) -> [{escape(K), prettify_typed_amqp_value(T, V)} || {K, T, V} <- Table]. -prettify_typed_amqp_value(Type, Value) -> - case Type of - longstr -> escape(Value); - table -> prettify_amqp_table(Value); - array -> [prettify_typed_amqp_value(T, V) || {T, V} <- Value]; - _ -> Value - end. +prettify_typed_amqp_value(longstr, Value) -> escape(Value); +prettify_typed_amqp_value(table, Value) -> prettify_amqp_table(Value); +prettify_typed_amqp_value(array, Value) -> [prettify_typed_amqp_value(T, V) || + {T, V} <- Value]; +prettify_typed_amqp_value(_Type, Value) -> Value. %% the slower shutdown on windows required to flush stdout quit(Status) -> case os:type() of - {unix, _} -> - halt(Status); - {win32, _} -> - init:stop(Status) + {unix, _} -> halt(Status); + {win32, _} -> init:stop(Status) end. -- cgit v1.2.1 From 8196a4f395d2a95d6796fc1d6ce3dbbb529c8be3 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:08:50 +0100 Subject: Slightly better explanation for some epmd errors. --- src/rabbit_prelaunch.erl | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 8800e8d6..0b058f76 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -16,7 +16,7 @@ -module(rabbit_prelaunch). --export([start/0, stop/0]). +-export([start/0, stop/0, duplicate_node_check/1]). -define(BaseApps, [rabbit]). -define(ERROR_CODE, 1). @@ -258,8 +258,19 @@ duplicate_node_check(NodeStr) -> terminate(?ERROR_CODE); false -> ok end; - {error, EpmdReason} -> terminate("unexpected epmd error: ~p~n", - [EpmdReason]) + {error, EpmdReason} -> + Tip = case EpmdReason of + address -> + io_lib:format("(Unable to connect to epmd on host " ++ + "~p using tcp port 4369.)", + [NodeHost]); + nxdomain -> + io_lib:format("(Can't resolve host ~p.)", + [NodeHost]); + _ -> [] + end, + terminate("unexpected epmd error: ~p ~s~n", + [EpmdReason, Tip]) end. terminate(Fmt, Args) -> -- cgit v1.2.1 From b88e25d8306d395e6943aa1a5bc3cda62d189318 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:31:46 +0100 Subject: Matthias doesn't like mentioning the port number. --- src/rabbit_prelaunch.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 0b058f76..d8cb2918 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -261,8 +261,8 @@ duplicate_node_check(NodeStr) -> {error, EpmdReason} -> Tip = case EpmdReason of address -> - io_lib:format("(Unable to connect to epmd on host " ++ - "~p using tcp port 4369.)", + io_lib:format("(Unable to connect to epmd on " ++ + "host ~p.)", [NodeHost]); nxdomain -> io_lib:format("(Can't resolve host ~p.)", -- cgit v1.2.1 From 6f45978568372304acee4e138cbac2da59739970 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:36:55 +0100 Subject: cosmetic --- src/rabbit_prelaunch.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index d8cb2918..078ac338 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -261,9 +261,8 @@ duplicate_node_check(NodeStr) -> {error, EpmdReason} -> Tip = case EpmdReason of address -> - io_lib:format("(Unable to connect to epmd on " ++ - "host ~p.)", - [NodeHost]); + io_lib:format("(Unable to connect to epmd on " + "host ~p.)", [NodeHost]); nxdomain -> io_lib:format("(Can't resolve host ~p.)", [NodeHost]); -- cgit v1.2.1 From 8678272e1759f1fe7467582acf58830f1e28a549 Mon Sep 17 00:00:00 2001 From: Marek Majkowski Date: Tue, 29 Mar 2011 17:52:23 +0100 Subject: Finishing touches. --- src/rabbit_prelaunch.erl | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 078ac338..c8ad7c9c 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -259,17 +259,12 @@ duplicate_node_check(NodeStr) -> false -> ok end; {error, EpmdReason} -> - Tip = case EpmdReason of - address -> - io_lib:format("(Unable to connect to epmd on " - "host ~p.)", [NodeHost]); - nxdomain -> - io_lib:format("(Can't resolve host ~p.)", - [NodeHost]); - _ -> [] - end, - terminate("unexpected epmd error: ~p ~s~n", - [EpmdReason, Tip]) + terminate("epmd error for host ~p: ~p (~s)~n", + [NodeHost, EpmdReason, + case EpmdReason of + address -> "unable to establish tcp connection"; + _ -> inet:format_error(EpmdReason) + end]) end. terminate(Fmt, Args) -> -- cgit v1.2.1 From 02e0065a00a7e131a6293de2a4d843b2e8367c05 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 29 Mar 2011 19:17:57 +0100 Subject: shrinkage: one dict is better than two --- src/rabbit_binding.erl | 54 ++++++++++++++++++++------------------------------ 1 file changed, 22 insertions(+), 32 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 204be5f6..d1e6f220 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -407,38 +407,28 @@ merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> [Bindings1 | Bindings2]}. process_deletions(Deletions) -> - Serials = dict:fold( - fun (XName, {X, Deleted, Bindings}, Acc) -> - FlatBindings = lists:flatten(Bindings), - pd_callback(transaction, X, Deleted, FlatBindings), - case Deleted of - deleted -> Acc; - not_deleted -> dict:store(XName, - rabbit_exchange:serial(X), - Acc) - end - end, dict:new(), Deletions), + AugmentedDeletions = + dict:map(fun (_XName, {X, deleted, Bindings}) -> + Bs = lists:flatten(Bindings), + pd_callback(transaction, X, delete, Bs), + {X, deleted, Bs, none}; + (_XName, {X, not_deleted, Bindings}) -> + Bs = lists:flatten(Bindings), + pd_callback(transaction, X, remove_bindings, Bs), + {X, not_deleted, Bs, rabbit_exchange:serial(X)} + end, Deletions), fun() -> - dict:fold( - fun (XName, {X, Deleted, Bindings}, ok) -> - FlatBindings = lists:flatten(Bindings), - Serial = case Deleted of - deleted -> none; - not_deleted -> dict:fetch(XName, Serials) - end, - pd_callback(Serial, X, Deleted, FlatBindings), - [rabbit_event:notify(binding_deleted, info(B)) || - B <- FlatBindings], - case Deleted of - deleted -> ok = rabbit_event:notify( - exchange_deleted, [{name, XName}]); - _ -> ok - end - end, ok, Deletions) + dict:fold(fun (XName, {X, deleted, Bs, Serial}, ok) -> + ok = rabbit_event:notify( + exchange_deleted, [{name, XName}]), + del_notify(Bs), + pd_callback(Serial, X, delete, Bs); + (_XName, {X, not_deleted, Bs, Serial}, ok) -> + del_notify(Bs), + pd_callback(Serial, X, remove_bindings, Bs) + end, ok, AugmentedDeletions) end. -pd_callback(Arg, X, Deleted, Bindings) -> - ok = rabbit_exchange:callback(X, case Deleted of - not_deleted -> remove_bindings; - deleted -> delete - end, [Arg, X, Bindings]). +del_notify(Bs) -> [rabbit_event:notify(binding_deleted, info(B)) || B <- Bs]. + +pd_callback(Arg, X, F, Bs) -> ok = rabbit_exchange:callback(X, F, [Arg, X, Bs]). -- cgit v1.2.1 From 8fd1963f80794083d1e898436c137fa5cf9c21c0 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 29 Mar 2011 20:58:52 +0100 Subject: remove R13isms --- src/gm.erl | 3 ++- src/gm_soak_test.erl | 7 ++++--- src/rabbit_amqqueue_process.erl | 14 +++++++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 5b3623cf..1edcde11 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -516,7 +516,8 @@ flush(Server) -> init([GroupName, Module, Args]) -> - random:seed(now()), + {MegaSecs, Secs, MicroSecs} = now(), + random:seed(MegaSecs, Secs, MicroSecs), gen_server2:cast(self(), join), Self = self(), {ok, #state { self = Self, diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl index 1f8832a6..dae42ac7 100644 --- a/src/gm_soak_test.erl +++ b/src/gm_soak_test.erl @@ -35,7 +35,7 @@ with_state(Fun) -> inc() -> case 1 + get(count) of - 100000 -> Now = os:timestamp(), + 100000 -> Now = now(), Start = put(ts, Now), Diff = timer:now_diff(Now, Start), Rate = 100000 / (Diff / 1000000), @@ -48,7 +48,7 @@ joined([], Members) -> io:format("Joined ~p (~p members)~n", [self(), length(Members)]), put(state, dict:from_list([{Member, empty} || Member <- Members])), put(count, 0), - put(ts, os:timestamp()), + put(ts, now()), ok. members_changed([], Births, Deaths) -> @@ -101,7 +101,8 @@ terminate([], Reason) -> spawn_member() -> spawn_link( fun () -> - random:seed(now()), + {MegaSecs, Secs, MicroSecs} = now(), + random:seed(MegaSecs, Secs, MicroSecs), %% start up delay of no more than 10 seconds timer:sleep(random:uniform(10000)), {ok, Pid} = gm:start_link(?MODULE, ?MODULE, []), diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 3f5758ce..2b0fe17e 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -428,11 +428,19 @@ confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> {CMs, MTC0} end end, {gb_trees:empty(), MTC}, MsgIds), - gb_trees:map(fun(ChPid, MsgSeqNos) -> - rabbit_channel:confirm(ChPid, MsgSeqNos) - end, CMs), + gb_trees_foreach(fun(ChPid, MsgSeqNos) -> + rabbit_channel:confirm(ChPid, MsgSeqNos) + end, CMs), State#q{msg_id_to_channel = MTC1}. +gb_trees_foreach(_, none) -> + ok; +gb_trees_foreach(Fun, {Key, Val, It}) -> + Fun(Key, Val), + gb_trees_foreach(Fun, gb_trees:next(It)); +gb_trees_foreach(Fun, Tree) -> + gb_trees_foreach(Fun, gb_trees:next(gb_trees:iterator(Tree))). + gb_trees_cons(Key, Value, Tree) -> case gb_trees:lookup(Key, Tree) of {value, Values} -> gb_trees:update(Key, [Value | Values], Tree); -- cgit v1.2.1 From 81e65665ede1096e482215d4c9c516f6a8e81c2d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:20:43 +0100 Subject: test refactor: mock the writer only once --- src/rabbit_tests.erl | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ca046c91..be868215 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1178,9 +1178,15 @@ test_server_status() -> passed. -test_spawn(Receiver) -> +test_writer(Pid) -> + receive + shutdown -> ok; + {send_command, Method} -> Pid ! Method, test_writer(Pid) + end. + +test_spawn() -> Me = self(), - Writer = spawn(fun () -> Receiver(Me) end), + Writer = spawn(fun () -> test_writer(Me) end), {ok, Ch} = rabbit_channel:start_link( 1, Me, Writer, Me, rabbit_framing_amqp_0_9_1, user(<<"guest">>), <<"/">>, [], self(), @@ -1198,15 +1204,6 @@ user(Username) -> impl = #internal_user{username = Username, is_admin = true}}. -test_statistics_receiver(Pid) -> - receive - shutdown -> - ok; - {send_command, Method} -> - Pid ! Method, - test_statistics_receiver(Pid) - end. - test_statistics_event_receiver(Pid) -> receive Foo -> @@ -1228,17 +1225,8 @@ test_statistics_receive_event1(Ch, Matcher) -> after 1000 -> throw(failed_to_receive_event) end. -test_confirms_receiver(Pid) -> - receive - shutdown -> - ok; - {send_command, Method} -> - Pid ! Method, - test_confirms_receiver(Pid) - end. - test_confirms() -> - {_Writer, Ch} = test_spawn(fun test_confirms_receiver/1), + {_Writer, Ch} = test_spawn(), DeclareBindDurableQueue = fun() -> rabbit_channel:do(Ch, #'queue.declare'{durable = true}), @@ -1311,7 +1299,7 @@ test_statistics() -> %% by far the most complex code though. %% Set up a channel and queue - {_Writer, Ch} = test_spawn(fun test_statistics_receiver/1), + {_Writer, Ch} = test_spawn(), rabbit_channel:do(Ch, #'queue.declare'{}), QName = receive #'queue.declare_ok'{queue = Q0} -> Q0 @@ -1462,18 +1450,8 @@ test_delegates_sync(SecondaryNode) -> passed. -test_queue_cleanup_receiver(Pid) -> - receive - shutdown -> - ok; - {send_command, Method} -> - Pid ! Method, - test_queue_cleanup_receiver(Pid) - end. - - test_queue_cleanup(_SecondaryNode) -> - {_Writer, Ch} = test_spawn(fun test_queue_cleanup_receiver/1), + {_Writer, Ch} = test_spawn(), rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }), receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} -> ok -- cgit v1.2.1 From 88f1f2f23d4cc3cbd4612f72d1cd40830aa2c8b6 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:21:21 +0100 Subject: cosmetic --- src/rabbit_tests.erl | 130 ++++++++++++++++++++++++--------------------------- 1 file changed, 60 insertions(+), 70 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index be868215..ea7d1343 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -598,39 +598,37 @@ test_topic_matching() -> exchange_op_callback(X, create, []), %% add some bindings - Bindings = lists:map( - fun ({Key, Q}) -> - #binding{source = XName, - key = list_to_binary(Key), - destination = #resource{virtual_host = <<"/">>, - kind = queue, - name = list_to_binary(Q)}} - end, [{"a.b.c", "t1"}, - {"a.*.c", "t2"}, - {"a.#.b", "t3"}, - {"a.b.b.c", "t4"}, - {"#", "t5"}, - {"#.#", "t6"}, - {"#.b", "t7"}, - {"*.*", "t8"}, - {"a.*", "t9"}, - {"*.b.c", "t10"}, - {"a.#", "t11"}, - {"a.#.#", "t12"}, - {"b.b.c", "t13"}, - {"a.b.b", "t14"}, - {"a.b", "t15"}, - {"b.c", "t16"}, - {"", "t17"}, - {"*.*.*", "t18"}, - {"vodka.martini", "t19"}, - {"a.b.c", "t20"}, - {"*.#", "t21"}, - {"#.*.#", "t22"}, - {"*.#.#", "t23"}, - {"#.#.#", "t24"}, - {"*", "t25"}, - {"#.b.#", "t26"}]), + Bindings = [#binding{source = XName, + key = list_to_binary(Key), + destination = #resource{virtual_host = <<"/">>, + kind = queue, + name = list_to_binary(Q)}} || + {Key, Q} <- [{"a.b.c", "t1"}, + {"a.*.c", "t2"}, + {"a.#.b", "t3"}, + {"a.b.b.c", "t4"}, + {"#", "t5"}, + {"#.#", "t6"}, + {"#.b", "t7"}, + {"*.*", "t8"}, + {"a.*", "t9"}, + {"*.b.c", "t10"}, + {"a.#", "t11"}, + {"a.#.#", "t12"}, + {"b.b.c", "t13"}, + {"a.b.b", "t14"}, + {"a.b", "t15"}, + {"b.c", "t16"}, + {"", "t17"}, + {"*.*.*", "t18"}, + {"vodka.martini", "t19"}, + {"a.b.c", "t20"}, + {"*.#", "t21"}, + {"#.*.#", "t22"}, + {"*.#.#", "t23"}, + {"#.#.#", "t24"}, + {"*", "t25"}, + {"#.b.#", "t26"}]], lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), @@ -669,22 +667,23 @@ test_topic_matching() -> ordsets:from_list(RemovedBindings))), %% test some matches - test_topic_expect_match(X, - [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", - "t23", "t24", "t26"]}, - {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", - "t22", "t23", "t24", "t26"]}, - {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", - "t23", "t24", "t26"]}, - {"", ["t6", "t17", "t24"]}, - {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, - {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, - {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, - {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, - {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", - "t24", "t26"]}, - {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, - {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), + test_topic_expect_match( + X, + [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22", + "t23", "t24", "t26"]}, + {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15", + "t22", "t23", "t24", "t26"]}, + {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22", + "t23", "t24", "t26"]}, + {"", ["t6", "t17", "t24"]}, + {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]}, + {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]}, + {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]}, + {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]}, + {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23", + "t24", "t26"]}, + {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]}, + {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]), %% remove the entire exchange exchange_op_callback(X, delete, [RemainingBindings]), @@ -1206,9 +1205,7 @@ user(Username) -> test_statistics_event_receiver(Pid) -> receive - Foo -> - Pid ! Foo, - test_statistics_event_receiver(Pid) + Foo -> Pid ! Foo, test_statistics_event_receiver(Pid) end. test_statistics_receive_event(Ch, Matcher) -> @@ -1252,10 +1249,9 @@ test_confirms() -> QPid1 = Q1#amqqueue.pid, %% Enable confirms rabbit_channel:do(Ch, #'confirm.select'{}), - receive #'confirm.select_ok'{} -> - ok - after 1000 -> - throw(failed_to_enable_confirms) + receive + #'confirm.select_ok'{} -> ok + after 1000 -> throw(failed_to_enable_confirms) end, %% Publish a message rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>, @@ -1267,25 +1263,19 @@ test_confirms() -> QPid1 ! boom, %% Wait for a nack receive - #'basic.nack'{} -> - ok; - #'basic.ack'{} -> - throw(received_ack_instead_of_nack) - after 2000 -> - throw(did_not_receive_nack) + #'basic.nack'{} -> ok; + #'basic.ack'{} -> throw(received_ack_instead_of_nack) + after 2000 -> throw(did_not_receive_nack) end, receive - #'basic.ack'{} -> - throw(received_ack_when_none_expected) - after 1000 -> - ok + #'basic.ack'{} -> throw(received_ack_when_none_expected) + after 1000 -> ok end, %% Cleanup rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}), - receive #'queue.delete_ok'{} -> - ok - after 1000 -> - throw(failed_to_cleanup_queue) + receive + #'queue.delete_ok'{} -> ok + after 1000 -> throw(failed_to_cleanup_queue) end, unlink(Ch), ok = rabbit_channel:shutdown(Ch), -- cgit v1.2.1 From 713ee0002753f3278b1a4198f7d7101a2e820007 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:27:20 +0100 Subject: consistency --- src/rabbit_exchange.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 1a8d8bed..5f2d63f6 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -104,8 +104,8 @@ recover_with_bindings(Bs, [X = #exchange{type = Type} | Xs], Bindings) -> recover_with_bindings([], [], []) -> ok. -callback(#exchange{type = XType}, Fun, Args) -> - apply(type_to_module(XType), Fun, Args). +callback(#exchange{type = Type}, Fun, Args) -> + apply(type_to_module(Type), Fun, Args). declare(XName, Type, Durable, AutoDelete, Internal, Args) -> X = #exchange{name = XName, @@ -145,9 +145,9 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> Err end). -write_exchange(X = #exchange{name = Name, type = XT}) -> +write_exchange(X = #exchange{name = Name, type = Type}) -> ok = mnesia:write(rabbit_exchange, X, write), - case (type_to_module(XT)):serialise_events() of + case (type_to_module(Type)):serialise_events() of true -> S = #exchange_serial{name = Name, serial = 0}, ok = mnesia:write(rabbit_exchange_serial, S, write); false -> ok @@ -323,8 +323,8 @@ unconditional_delete(X = #exchange{name = XName}) -> Bindings = rabbit_binding:remove_for_source(XName), {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}. -serial(#exchange{name = XName, type = XType}) -> - case (type_to_module(XType)):serialise_events() of +serial(#exchange{name = XName, type = Type}) -> + case (type_to_module(Type)):serialise_events() of true -> next_serial(XName); false -> none end. -- cgit v1.2.1 From 3418e5cf5e1de222d19c1984e9cad0fac2a90bf1 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:40:55 +0100 Subject: simplify: rabbit_misc:const/1 and thunk/1 types are identical get rid of former --- src/rabbit_binding.erl | 6 +++--- src/rabbit_misc.erl | 5 ++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index d1e6f220..534233c1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -44,9 +44,9 @@ rabbit_types:exchange() | rabbit_types:amqqueue()) -> rabbit_types:ok_or_error(rabbit_types:amqp_error()))). -type(bindings() :: [rabbit_types:binding()]). --type(add_res() :: bind_res() | rabbit_misc:const(bind_res())). +-type(add_res() :: bind_res() | rabbit_misc:thunk(bind_res())). -type(bind_or_error() :: bind_res() | rabbit_types:error('binding_not_found')). --type(remove_res() :: bind_or_error() | rabbit_misc:const(bind_or_error())). +-type(remove_res() :: bind_or_error() | rabbit_misc:thunk(bind_or_error())). -opaque(deletions() :: dict()). @@ -77,7 +77,7 @@ (rabbit_types:binding_destination()) -> deletions()). -spec(remove_transient_for_destination/1 :: (rabbit_types:binding_destination()) -> deletions()). --spec(process_deletions/1 :: (deletions()) -> rabbit_misc:const('ok')). +-spec(process_deletions/1 :: (deletions()) -> rabbit_misc:thunk('ok')). -spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()). -spec(add_deletion/3 :: (rabbit_exchange:name(), {'undefined' | rabbit_types:exchange(), diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 7d3476e6..ad59edae 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -62,11 +62,10 @@ -ifdef(use_specs). --export_type([resource_name/0, thunk/1, const/1]). +-export_type([resource_name/0, thunk/1]). -type(ok_or_error() :: rabbit_types:ok_or_error(any())). -type(thunk(T) :: fun(() -> T)). --type(const(T) :: fun(() -> T)). -type(resource_name() :: binary()). -type(optdef() :: {flag, string()} | {option, string(), any()}). -type(channel_or_connection_exit() @@ -192,7 +191,7 @@ -spec(now_ms/0 :: () -> non_neg_integer()). -spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')). -spec(const_ok/0 :: () -> 'ok'). --spec(const/1 :: (A) -> const(A)). +-spec(const/1 :: (A) -> thunk(A)). -spec(ntoa/1 :: (inet:ip_address()) -> string()). -spec(ntoab/1 :: (inet:ip_address()) -> string()). -spec(is_process_alive/1 :: (pid()) -> boolean()). -- cgit v1.2.1 From cb6a966ab0fe888214e1353f5f4035810bae8d5a Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 07:43:22 +0100 Subject: cosmetic --- src/rabbit_tests.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 5a37c31a..55306657 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -692,10 +692,10 @@ test_topic_matching() -> test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]), passed. -exchange_op_callback(X, Fun, ExtraArgs) -> +exchange_op_callback(X, Fun, Args) -> rabbit_misc:execute_mnesia_transaction( - fun () -> rabbit_exchange:callback(X, Fun, [transaction, X] ++ ExtraArgs) end), - rabbit_exchange:callback(X, Fun, [none, X] ++ ExtraArgs). + fun () -> rabbit_exchange:callback(X, Fun, [transaction, X] ++ Args) end), + rabbit_exchange:callback(X, Fun, [none, X] ++ Args). test_topic_expect_match(X, List) -> lists:foreach( -- cgit v1.2.1 From d8f2e891ce8c4a42184e084b582724546c379495 Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Wed, 30 Mar 2011 11:47:30 +0100 Subject: adding reporting exception to supervisor2 --- src/supervisor2.erl | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 1a240856..2c0874ab 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -38,6 +38,10 @@ %% child is a supervisor and it exits normally (i.e. with reason of %% 'shutdown') then the child's parent also exits normally. %% +%% 5) Added an exception to reporting: If a child has MaxR = 0 and it +%% terminates with reason {shutdown, _}, then supervisor2 behaves +%% as supervisor *except* it does not report anything to error_logger. +%% %% All modifications are (C) 2010-2011 VMware, Inc. %% %% %CopyrightBegin% @@ -542,8 +546,7 @@ do_restart({RestartType, Delay}, Reason, Child, State) -> {ok, state_del_child(Child, NState)} end; do_restart(permanent, Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name), - restart(Child, State); + maybe_report_and_restart(Reason, Child, State); do_restart(intrinsic, normal, Child, State) -> {shutdown, state_del_child(Child, State)}; do_restart(intrinsic, shutdown, Child = #child{child_type = supervisor}, @@ -557,13 +560,24 @@ do_restart(_, shutdown, Child, State) -> {ok, NState}; do_restart(Type, Reason, Child, State) when Type =:= transient orelse Type =:= intrinsic -> - report_error(child_terminated, Reason, Child, State#state.name), - restart(Child, State); + maybe_report_and_restart(Reason, Child, State); do_restart(temporary, Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name), + maybe_report(Reason, Child, State), NState = state_del_child(Child, State), {ok, NState}. +maybe_report_and_restart({shutdown, _}, Child, State = #state{intensity = 0}) -> + {terminate, NState} = add_restart(State), + {shutdown, state_del_child(Child, NState)}; +maybe_report_and_restart(Reason, Child, State) -> + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State). + +maybe_report({shutdown, _}, _Child, #state{intensity = 0}) -> + ok; +maybe_report(Reason, Child, State) -> + report_error(child_terminated, Reason, Child, State#state.name). + restart(Child, State) -> case add_restart(State) of {ok, NState} -> -- cgit v1.2.1 From 77d54c95c8e90f1329a5f876497db617921a8ae2 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 12:11:06 +0100 Subject: cosmetic --- src/rabbit_exchange.erl | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 5f2d63f6..dfcb05d6 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -84,15 +84,12 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_fold( - fun (X, Acc) -> - write_exchange(X), - [X | Acc] - end, [], rabbit_durable_exchange), + Xs = rabbit_misc:table_fold(fun (X, Acc) -> store(X), [X | Acc] end, + [], rabbit_durable_exchange), Bs = rabbit_binding:recover(), - recover_with_bindings( - lists:keysort(#binding.source, Bs), - lists:keysort(#exchange.name, Xs), []). + recover_with_bindings(lists:keysort(#binding.source, Bs), + lists:keysort(#exchange.name, Xs), + []). recover_with_bindings([B = #binding{source = XName} | Rest], Xs = [#exchange{name = XName} | _], @@ -121,7 +118,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> fun () -> case mnesia:wread({rabbit_exchange, XName}) of [] -> - write_exchange(X), + store(X), ok = case Durable of true -> mnesia:write(rabbit_durable_exchange, X, write); @@ -145,7 +142,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> Err end). -write_exchange(X = #exchange{name = Name, type = Type}) -> +store(X = #exchange{name = Name, type = Type}) -> ok = mnesia:write(rabbit_exchange, X, write), case (type_to_module(Type)):serialise_events() of true -> S = #exchange_serial{name = Name, serial = 0}, -- cgit v1.2.1 From 49444ced98f0bad66a788db1b0e10773ac346b9e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 12:15:20 +0100 Subject: cosmetic --- src/rabbit_binding.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 534233c1..2439d455 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -410,11 +410,11 @@ process_deletions(Deletions) -> AugmentedDeletions = dict:map(fun (_XName, {X, deleted, Bindings}) -> Bs = lists:flatten(Bindings), - pd_callback(transaction, X, delete, Bs), + x_callback(transaction, X, delete, Bs), {X, deleted, Bs, none}; (_XName, {X, not_deleted, Bindings}) -> Bs = lists:flatten(Bindings), - pd_callback(transaction, X, remove_bindings, Bs), + x_callback(transaction, X, remove_bindings, Bs), {X, not_deleted, Bs, rabbit_exchange:serial(X)} end, Deletions), fun() -> @@ -422,13 +422,13 @@ process_deletions(Deletions) -> ok = rabbit_event:notify( exchange_deleted, [{name, XName}]), del_notify(Bs), - pd_callback(Serial, X, delete, Bs); + x_callback(Serial, X, delete, Bs); (_XName, {X, not_deleted, Bs, Serial}, ok) -> del_notify(Bs), - pd_callback(Serial, X, remove_bindings, Bs) + x_callback(Serial, X, remove_bindings, Bs) end, ok, AugmentedDeletions) end. del_notify(Bs) -> [rabbit_event:notify(binding_deleted, info(B)) || B <- Bs]. -pd_callback(Arg, X, F, Bs) -> ok = rabbit_exchange:callback(X, F, [Arg, X, Bs]). +x_callback(Arg, X, F, Bs) -> ok = rabbit_exchange:callback(X, F, [Arg, X, Bs]). -- cgit v1.2.1 From 92e9e3748a98d2f76f22cabae854772c168c5637 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 30 Mar 2011 12:36:38 +0100 Subject: Shutup, dialyzer --- src/rabbit_tests.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index ea7d1343..fb1c9a34 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -700,9 +700,14 @@ test_topic_expect_match(X, List) -> lists:foreach( fun ({Key, Expected}) -> BinKey = list_to_binary(Key), + Message = rabbit_basic:message(X#exchange.name, BinKey, + #'P_basic'{}, <<>>), Res = rabbit_exchange_type_topic:route( - X, #delivery{message = #basic_message{routing_keys = - [BinKey]}}), + X, #delivery{mandatory = false, + immediate = false, + txn = none, + sender = self(), + message = Message}), ExpectedRes = lists:map( fun (Q) -> #resource{virtual_host = <<"/">>, kind = queue, -- cgit v1.2.1 From 1312b972cf32478b68223af795a40c979e65b4d3 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 30 Mar 2011 12:45:54 +0100 Subject: cosmetic: put 'rabbitmqctl wait' code in the right place --- src/rabbit_control.erl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 571eb5e4..4a2858f0 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -127,6 +127,8 @@ usage() -> io:format("~s", [rabbit_ctl_usage:usage()]), quit(1). +%%---------------------------------------------------------------------------- + action(stop, Node, [], _Opts, Inform) -> Inform("Stopping and halting node ~p", [Node]), call(Node, {rabbit, stop_and_halt, []}); @@ -159,6 +161,10 @@ action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> [Node, ClusterNodes]), rpc_call(Node, rabbit_mnesia, force_cluster, [ClusterNodes]); +action(wait, Node, [], _Opts, Inform) -> + Inform("Waiting for ~p", [Node]), + wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). + action(status, Node, [], _Opts, Inform) -> Inform("Status of node ~p", [Node]), case call(Node, {rabbit, status, []}) of @@ -294,9 +300,7 @@ action(list_permissions, Node, [], Opts, Inform) -> display_list(call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]})); -action(wait, Node, [], _Opts, Inform) -> - Inform("Waiting for ~p", [Node]), - wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). +%%---------------------------------------------------------------------------- wait_for_application(Node, Attempts) -> case rpc_call(Node, application, which_applications, [infinity]) of -- cgit v1.2.1 From a4c348672a43acd05303a38af8c08196924fb650 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 13:38:07 +0100 Subject: Make rabbitmq-server compilable. --- src/rabbit_control.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 4a2858f0..6fb465b5 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -163,7 +163,7 @@ action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) -> action(wait, Node, [], _Opts, Inform) -> Inform("Waiting for ~p", [Node]), - wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS). + wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS); action(status, Node, [], _Opts, Inform) -> Inform("Status of node ~p", [Node]), @@ -298,7 +298,7 @@ action(list_permissions, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost ~p", [VHost]), display_list(call(Node, {rabbit_auth_backend_internal, - list_vhost_permissions, [VHost]})); + list_vhost_permissions, [VHost]})). %%---------------------------------------------------------------------------- -- cgit v1.2.1 From f5638b24b218c063a93a055b426f079dda5c8c88 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 13:50:36 +0100 Subject: Mark network connections as network. --- src/rabbit_reader.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 609bb43f..ff0e9269 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -681,7 +681,7 @@ handle_method0(#'connection.open'{virtual_host = VHostPath}, State#v1{connection_state = running, connection = NewConnection}), rabbit_event:notify(connection_created, - infos(?CREATION_EVENT_KEYS, State1)), + [{type, network}|infos(?CREATION_EVENT_KEYS, State1)]), rabbit_event:if_enabled(StatsTimer, fun() -> internal_emit_stats(State1) end), State1; -- cgit v1.2.1 From d4b7b576f88362839631499a19b6f9695ac03cdf Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 13:55:33 +0100 Subject: Add space --- src/rabbit_reader.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index ff0e9269..42af91a8 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -681,7 +681,8 @@ handle_method0(#'connection.open'{virtual_host = VHostPath}, State#v1{connection_state = running, connection = NewConnection}), rabbit_event:notify(connection_created, - [{type, network}|infos(?CREATION_EVENT_KEYS, State1)]), + [{type, network} | + infos(?CREATION_EVENT_KEYS, State1)]), rabbit_event:if_enabled(StatsTimer, fun() -> internal_emit_stats(State1) end), State1; -- cgit v1.2.1 From 26b7b740905b3c15559c81cc0e945890ae8ec4d4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 14:06:52 +0100 Subject: Make sure Attempts does not go negative. --- src/rabbit_control.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 6fb465b5..1af91f4c 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -304,10 +304,9 @@ action(list_permissions, Node, [], Opts, Inform) -> wait_for_application(Node, Attempts) -> case rpc_call(Node, application, which_applications, [infinity]) of - {badrpc, _} = E -> NewAttempts = Attempts - 1, - case NewAttempts of + {badrpc, _} = E -> case Attempts of 0 -> E; - _ -> wait_for_application0(Node, NewAttempts) + _ -> wait_for_application0(Node, Attempts - 1) end; Apps -> case proplists:is_defined(rabbit, Apps) of %% We've seen the node up; if it goes down -- cgit v1.2.1 From 15c600834bc68051da3b2b16a3457ca7b1a0e758 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 14:12:05 +0100 Subject: Call the field next, not serial. --- include/rabbit.hrl | 2 +- src/rabbit_exchange.erl | 7 +++---- src/rabbit_upgrade_functions.erl | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 99608be4..db4773b8 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -43,7 +43,7 @@ -record(resource, {virtual_host, kind, name}). -record(exchange, {name, type, durable, auto_delete, internal, arguments}). --record(exchange_serial, {name, serial}). +-record(exchange_serial, {name, next}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, arguments, pid}). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index dfcb05d6..a9b1b2e5 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -145,7 +145,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> store(X = #exchange{name = Name, type = Type}) -> ok = mnesia:write(rabbit_exchange, X, write), case (type_to_module(Type)):serialise_events() of - true -> S = #exchange_serial{name = Name, serial = 0}, + true -> S = #exchange_serial{name = Name, next = 1}, ok = mnesia:write(rabbit_exchange_serial, S, write); false -> ok end. @@ -327,11 +327,10 @@ serial(#exchange{name = XName, type = Type}) -> end. next_serial(XName) -> - [#exchange_serial{serial = S}] = + [#exchange_serial{next = Serial}] = mnesia:read(rabbit_exchange_serial, XName, write), - Serial = S + 1, ok = mnesia:write(rabbit_exchange_serial, - #exchange_serial{name = XName, serial = Serial}, write), + #exchange_serial{name = XName, next = Serial + 1}, write), Serial. %% Used with atoms from records; e.g., the type is expected to exist. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 7c53e996..b21dd764 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -105,7 +105,7 @@ topic_trie() -> exchange_event_serial() -> create(rabbit_exchange_serial, [{record_name, exchange_serial}, - {attributes, [name, serial]}]). + {attributes, [name, next]}]). %%-------------------------------------------------------------------- -- cgit v1.2.1 From 9d173dc01bb2f1c796fdb29fc69e429ce1f783bd Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 14:22:42 +0100 Subject: Correct comment. --- src/rabbit_misc.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index ad59edae..93fe2766 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -404,7 +404,7 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> end), false). %% Like execute_mnesia_transaction/2, but TxFun is expected to return a -%% TailFun which gets called immediately before and after the tx commit +%% TailFun which gets called (only) immediately after the tx commit execute_mnesia_tx_with_tail(TxFun) -> case mnesia:is_transaction() of true -> execute_mnesia_transaction(TxFun); -- cgit v1.2.1 From 6019f2da620f17174e677b175ca938f335d8390e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 30 Mar 2011 15:31:45 +0100 Subject: Only recover exchanges and bindings that need to be recovered. --- src/rabbit_binding.erl | 11 +++++++---- src/rabbit_exchange.erl | 9 ++++++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 6167790e..359d4287 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -96,10 +96,13 @@ recover() -> rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - {_, ReverseRoute} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, ReverseRoute, write), - [B | Acc] + case mnesia:read({rabbit_route, B}) of + [] -> {_, Rev} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, Rev, write), + [B | Acc]; + [_] -> Acc + end end, [], rabbit_durable_route). exists(Binding) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 9d9b07af..0d13a684 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -84,9 +84,12 @@ recover() -> Xs = rabbit_misc:table_fold( - fun (X, Acc) -> - ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc] + fun (X = #exchange{name = XName}, Acc) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc]; + [_] -> Acc + end end, [], rabbit_durable_exchange), Bs = rabbit_binding:recover(), recover_with_bindings( -- cgit v1.2.1 From ba51aa80666fedded2c71ee57fe233906fa795a0 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 31 Mar 2011 12:07:17 +0100 Subject: Don't transform markers when upgrading messages --- src/rabbit_msg_store.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index bb26de64..9b8ddae0 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -2007,7 +2007,10 @@ transform_msg_file(FileOld, FileNew, TransformFun) -> rabbit_msg_file:scan( RefOld, filelib:file_size(FileOld), fun({MsgId, _Size, _Offset, BinMsg}, ok) -> - {ok, MsgNew} = TransformFun(binary_to_term(BinMsg)), + {ok, MsgNew} = case binary_to_term(BinMsg) of + <<>> -> {ok, <<>>}; %% dying client marker + Msg -> TransformFun(Msg) + end, {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew), ok end, ok), -- cgit v1.2.1 From 992e24b44109679d60ec7dc808548b9d57efffd4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 12:43:08 +0100 Subject: Change exchange type API to not distinguish between creating and recovering, and to allow recovering bindings. Recover bindings when needed. --- include/rabbit_exchange_type_spec.hrl | 9 ++++---- src/rabbit_binding.erl | 3 ++- src/rabbit_exchange.erl | 42 ++++++++++++++++++++++++----------- src/rabbit_exchange_type.erl | 13 +++++------ src/rabbit_exchange_type_direct.erl | 9 ++++---- src/rabbit_exchange_type_fanout.erl | 7 +++--- src/rabbit_exchange_type_headers.erl | 7 +++--- src/rabbit_exchange_type_topic.erl | 18 +++++++++------ src/rabbit_tests.erl | 2 +- 9 files changed, 62 insertions(+), 48 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 45c475d8..8163b6f2 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -20,13 +20,12 @@ -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). --spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). --spec(recover/2 :: (rabbit_types:exchange(), - [rabbit_types:binding()]) -> 'ok'). +-spec(start/3 :: (boolean(), rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). -spec(delete/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(add_binding/3 :: (boolean(), rabbit_types:exchange(), - rabbit_types:binding()) -> 'ok'). +-spec(add_bindings/3 :: (boolean(), rabbit_types:exchange(), + [rabbit_types:binding()]) -> 'ok'). -spec(remove_bindings/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(assert_args_equivalence/2 :: diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 359d4287..84ae789c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -129,7 +129,8 @@ add(Binding, InnerFun) -> fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback( - Src, add_binding, [Tx, Src, B]), + Src, add_bindings, + [Tx, Src, [B]]), rabbit_event:notify_if( not Tx, binding_created, info(B)) end; diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 0d13a684..f6ab9d74 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -92,18 +92,34 @@ recover() -> end end, [], rabbit_durable_exchange), Bs = rabbit_binding:recover(), - recover_with_bindings( - lists:keysort(#binding.source, Bs), - lists:keysort(#exchange.name, Xs), []). - -recover_with_bindings([B = #binding{source = XName} | Rest], - Xs = [#exchange{name = XName} | _], - Bindings) -> - recover_with_bindings(Rest, Xs, [B | Bindings]); -recover_with_bindings(Bs, [X = #exchange{type = Type} | Xs], Bindings) -> - (type_to_module(Type)):recover(X, Bindings), - recover_with_bindings(Bs, Xs, []); -recover_with_bindings([], [], []) -> + {RecXBs, NoRecXBs} = filter_recovered_exchanges(Xs, Bs), + ok = recovery_callbacks(RecXBs, NoRecXBs). + +%% TODO strip out bindings that are to queues not on this node +filter_recovered_exchanges(Xs, Bs) -> + RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), + lists:foldl( + fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> + case dict:find(Src, RecXs) of + {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; + error -> {ok, X} = lookup(Src), + {RecXBs, dict:append(X, B, NoRecXBs)} + end + end, {dict:new(), dict:new()}, Bs). + +recovery_callbacks(RecXBs, NoRecXBs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + dict:map(fun (X = #exchange{type = Type}, Bs) -> + io:format("Recover X ~p~n", [X]), + (type_to_module(Type)):start(Tx, X, Bs) + end, RecXBs), + dict:map(fun (X = #exchange{type = Type}, Bs) -> + io:format("Recover Bs ~p~n", [Bs]), + (type_to_module(Type)):add_bindings(Tx, X, Bs) + end, NoRecXBs) + end), ok. callback(#exchange{type = XType}, Fun, Args) -> @@ -134,7 +150,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - ok = (type_to_module(Type)):create(Tx, Exchange), + ok = (type_to_module(Type)):start(Tx, Exchange, []), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 547583e9..ad08eb86 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -26,17 +26,14 @@ behaviour_info(callbacks) -> %% called BEFORE declaration, to check args etc; may exit with #amqp_error{} {validate, 1}, - %% called after declaration when previously absent - {create, 2}, + %% called after declaration and recovery + {start, 3}, - %% called when recovering - {recover, 2}, - - %% called after exchange deletion. + %% called after exchange (auto)deletion. {delete, 3}, - %% called after a binding has been added - {add_binding, 3}, + %% called after a binding has been added or bindings have been recovered + {add_bindings, 3}, %% called after bindings have been deleted. {remove_bindings, 3}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 349c2f6e..1658c9f8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -20,8 +20,8 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, - add_binding/3, remove_bindings/3, assert_args_equivalence/2]). +-export([validate/1, start/3, delete/3, + add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). -rabbit_boot_step({?MODULE, @@ -40,10 +40,9 @@ route(#exchange{name = Name}, rabbit_router:match_routing_key(Name, Routes). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. +start(_Tx, _X, _Bs) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_binding(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index bc5293c8..83afdd71 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, add_binding/3, +-export([validate/1, start/3, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -39,10 +39,9 @@ route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. +start(_Tx, _X, _Bs) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_binding(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index d3529b06..0fe8404f 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, add_binding/3, +-export([validate/1, start/3, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -113,10 +113,9 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_X, _Bs) -> ok. +start(_Tx, _X, _Bs) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_binding(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..52f468ee 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, recover/2, delete/3, add_binding/3, +-export([validate/1, start/3, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -47,13 +47,14 @@ route(#exchange{name = X}, end || RKey <- Routes]). validate(_X) -> ok. -create(_Tx, _X) -> ok. -recover(_Exchange, Bs) -> +start(true, _X, Bs) -> rabbit_misc:execute_mnesia_transaction( fun () -> lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end). + end); +start(false, _X, _Bs) -> + ok. delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), @@ -62,9 +63,12 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_binding(true, _Exchange, Binding) -> - internal_add_binding(Binding); -add_binding(false, _Exchange, _Binding) -> +add_bindings(true, _X, Bs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> + lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) + end); +add_bindings(false, _X, _Bs) -> ok. remove_bindings(true, #exchange{name = X}, Bs) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index fb1c9a34..075258e5 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -629,7 +629,7 @@ test_topic_matching() -> {"#.#.#", "t24"}, {"*", "t25"}, {"#.b.#", "t26"}]], - lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, + lists:foreach(fun (B) -> exchange_op_callback(X, add_bindings, [[B]]) end, Bindings), %% test some matches -- cgit v1.2.1 From 5c36ec391adbf2d949cfbce79efab20eb03b7116 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 13:08:50 +0100 Subject: Only recover bindings that are to exchanges or to queues that are on this node. --- src/rabbit_binding.erl | 29 +++++++++++++++++++++++------ src/rabbit_exchange.erl | 3 --- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 84ae789c..c9cf0a39 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -96,15 +96,32 @@ recover() -> rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case mnesia:read({rabbit_route, B}) of - [] -> {_, Rev} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, Rev, write), - [B | Acc]; - [_] -> Acc + case should_recover(B) of + true -> {_, Rev} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, Rev, write), + [B | Acc]; + false -> Acc end end, [], rabbit_durable_route). +should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}) -> + case mnesia:read({rabbit_route, B}) of + [] -> case Kind of + exchange -> true; + queue -> case mnesia:read({rabbit_durable_queue, Dest}) of + [Q] -> #amqqueue{pid = Pid} = Q, + Node = node(), + case node(Pid) of + Node -> true; + _ -> false + end; + _ -> false + end + end; + _ -> false + end. + exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index f6ab9d74..572a0b70 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -95,7 +95,6 @@ recover() -> {RecXBs, NoRecXBs} = filter_recovered_exchanges(Xs, Bs), ok = recovery_callbacks(RecXBs, NoRecXBs). -%% TODO strip out bindings that are to queues not on this node filter_recovered_exchanges(Xs, Bs) -> RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), lists:foldl( @@ -112,11 +111,9 @@ recovery_callbacks(RecXBs, NoRecXBs) -> fun () -> ok end, fun (ok, Tx) -> dict:map(fun (X = #exchange{type = Type}, Bs) -> - io:format("Recover X ~p~n", [X]), (type_to_module(Type)):start(Tx, X, Bs) end, RecXBs), dict:map(fun (X = #exchange{type = Type}, Bs) -> - io:format("Recover Bs ~p~n", [Bs]), (type_to_module(Type)):add_bindings(Tx, X, Bs) end, NoRecXBs) end), -- cgit v1.2.1 From 6fe41f3e724cd65792916b24e50748d0bdc0e4be Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 13:25:18 +0100 Subject: Oops. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 075258e5..9b122a02 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -595,7 +595,7 @@ test_topic_matching() -> auto_delete = false, arguments = []}, %% create rabbit_exchange_type_topic:validate(X), - exchange_op_callback(X, create, []), + exchange_op_callback(X, start, [[]]), %% add some bindings Bindings = [#binding{source = XName, -- cgit v1.2.1 From ee0deb4dd37985bdfef24cda035121ba5e02f82b Mon Sep 17 00:00:00 2001 From: Vlad Alexandru Ionescu Date: Thu, 31 Mar 2011 14:59:46 +0100 Subject: reverting previous changes; treating {shutdown, _} exit reasons the same as normal in supervisor2 --- src/supervisor2.erl | 150 ++++++++++++++++++++++++---------------------------- 1 file changed, 70 insertions(+), 80 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 2c0874ab..73316db9 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -38,9 +38,8 @@ %% child is a supervisor and it exits normally (i.e. with reason of %% 'shutdown') then the child's parent also exits normally. %% -%% 5) Added an exception to reporting: If a child has MaxR = 0 and it -%% terminates with reason {shutdown, _}, then supervisor2 behaves -%% as supervisor *except* it does not report anything to error_logger. +%% 5) normal, shutdown and {shutdown, _} exit reasons are all treated the same +%% (i.e. are regarded as normal exits) %% %% All modifications are (C) 2010-2011 VMware, Inc. %% @@ -116,10 +115,10 @@ behaviour_info(_Other) -> %%% --------------------------------------------------- start_link(Mod, Args) -> gen_server:start_link(?MODULE, {self, Mod, Args}, []). - + start_link(SupName, Mod, Args) -> gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []). - + %%% --------------------------------------------------- %%% Interface functions. %%% --------------------------------------------------- @@ -162,9 +161,9 @@ delayed_restart(Supervisor, RestartDetails) -> gen_server:cast(Supervisor, {delayed_restart, RestartDetails}). %%% --------------------------------------------------- -%%% +%%% %%% Initialize the supervisor. -%%% +%%% %%% --------------------------------------------------- init({SupName, Mod, Args}) -> process_flag(trap_exit, true), @@ -183,7 +182,7 @@ init({SupName, Mod, Args}) -> Error -> {stop, {bad_return, {Mod, init, Error}}} end. - + init_children(State, StartSpec) -> SupName = State#state.name, case check_startspec(StartSpec) of @@ -213,7 +212,7 @@ init_dynamic(_State, StartSpec) -> %% Func: start_children/2 %% Args: Children = [#child] in start order %% SupName = {local, atom()} | {global, atom()} | {pid(),Mod} -%% Purpose: Start all children. The new list contains #child's +%% Purpose: Start all children. The new list contains #child's %% with pids. %% Returns: {ok, NChildren} | {error, NChildren} %% NChildren = [#child] in termination order (reversed @@ -245,7 +244,7 @@ do_start_child(SupName, Child) -> NChild = Child#child{pid = Pid}, report_progress(NChild, SupName), {ok, Pid, Extra}; - ignore -> + ignore -> {ok, undefined}; {error, What} -> {error, What}; What -> {error, What} @@ -264,23 +263,23 @@ do_start_child_i(M, F, A) -> What -> {error, What} end. - + %%% --------------------------------------------------- -%%% +%%% %%% Callback functions. -%%% +%%% %%% --------------------------------------------------- handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) -> #child{mfa = {M, F, A}} = hd(State#state.children), Args = A ++ EArgs, case do_start_child_i(M, F, Args) of {ok, Pid} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid}, NState}; {ok, Pid, Extra} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid, Extra}, NState}; What -> @@ -373,7 +372,7 @@ handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> %%% Hopefully cause a function-clause as there is no API function %%% that utilizes cast. handle_cast(null, State) -> - error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", + error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", []), {noreply, State}. @@ -390,7 +389,7 @@ handle_info({'EXIT', Pid, Reason}, State) -> end; handle_info(Msg, State) -> - error_logger:error_msg("Supervisor received unexpected message: ~p~n", + error_logger:error_msg("Supervisor received unexpected message: ~p~n", [Msg]), {noreply, State}. %% @@ -440,13 +439,13 @@ check_flags({Strategy, MaxIntensity, Period}) -> check_flags(What) -> {bad_flags, What}. -update_childspec(State, StartSpec) when ?is_simple(State) -> - case check_startspec(StartSpec) of - {ok, [Child]} -> - {ok, State#state{children = [Child]}}; - Error -> - {error, Error} - end; +update_childspec(State, StartSpec) when ?is_simple(State) -> + case check_startspec(StartSpec) of + {ok, [Child]} -> + {ok, State#state{children = [Child]}}; + Error -> + {error, Error} + end; update_childspec(State, StartSpec) -> case check_startspec(StartSpec) of @@ -467,7 +466,7 @@ update_childspec1([Child|OldC], Children, KeepOld) -> end; update_childspec1([], Children, KeepOld) -> % Return them in (keeped) reverse start order. - lists:reverse(Children ++ KeepOld). + lists:reverse(Children ++ KeepOld). update_chsp(OldCh, Children) -> case lists:map(fun (Ch) when OldCh#child.name =:= Ch#child.name -> @@ -481,7 +480,7 @@ update_chsp(OldCh, Children) -> NewC -> {ok, NewC} end. - + %%% --------------------------------------------------- %%% Start a new child. %%% --------------------------------------------------- @@ -493,12 +492,12 @@ handle_start_child(Child, State) -> {ok, Pid} -> Children = State#state.children, {{ok, Pid}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {ok, Pid, Extra} -> Children = State#state.children, {{ok, Pid, Extra}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {error, What} -> {{error, {What, Child}}, State} @@ -546,37 +545,28 @@ do_restart({RestartType, Delay}, Reason, Child, State) -> {ok, state_del_child(Child, NState)} end; do_restart(permanent, Reason, Child, State) -> - maybe_report_and_restart(Reason, Child, State); -do_restart(intrinsic, normal, Child, State) -> - {shutdown, state_del_child(Child, State)}; -do_restart(intrinsic, shutdown, Child = #child{child_type = supervisor}, - State) -> - {shutdown, state_del_child(Child, State)}; -do_restart(_, normal, Child, State) -> - NState = state_del_child(Child, State), - {ok, NState}; -do_restart(_, shutdown, Child, State) -> - NState = state_del_child(Child, State), - {ok, NState}; + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State); +do_restart(Type, normal, Child, State) -> + normal_or_shutdown_restart(Type, Child, State); +do_restart(Type, shutdown, Child, State) -> + normal_or_shutdown_restart(Type, Child, State); +do_restart(Type, {shutdown, _}, Child, State) -> + normal_or_shutdown_restart(Type, Child, State); do_restart(Type, Reason, Child, State) when Type =:= transient orelse Type =:= intrinsic -> - maybe_report_and_restart(Reason, Child, State); + report_error(child_terminated, Reason, Child, State#state.name), + restart(Child, State); do_restart(temporary, Reason, Child, State) -> - maybe_report(Reason, Child, State), + report_error(child_terminated, Reason, Child, State#state.name), NState = state_del_child(Child, State), {ok, NState}. -maybe_report_and_restart({shutdown, _}, Child, State = #state{intensity = 0}) -> - {terminate, NState} = add_restart(State), - {shutdown, state_del_child(Child, NState)}; -maybe_report_and_restart(Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name), - restart(Child, State). - -maybe_report({shutdown, _}, _Child, #state{intensity = 0}) -> - ok; -maybe_report(Reason, Child, State) -> - report_error(child_terminated, Reason, Child, State#state.name). +normal_or_shutdown_restart(intrinsic, Child, State) -> + {shutdown, state_del_child(Child, State)}; +normal_or_shutdown_restart(_, Child, State) -> + NState = state_del_child(Child, State), + {ok, NState}. restart(Child, State) -> case add_restart(State) of @@ -691,17 +681,17 @@ do_terminate(Child, _SupName) -> Child. %%----------------------------------------------------------------- -%% Shutdowns a child. We must check the EXIT value +%% Shutdowns a child. We must check the EXIT value %% of the child, because it might have died with another reason than -%% the wanted. In that case we want to report the error. We put a -%% monitor on the child an check for the 'DOWN' message instead of -%% checking for the 'EXIT' message, because if we check the 'EXIT' -%% message a "naughty" child, who does unlink(Sup), could hang the -%% supervisor. +%% the wanted. In that case we want to report the error. We put a +%% monitor on the child an check for the 'DOWN' message instead of +%% checking for the 'EXIT' message, because if we check the 'EXIT' +%% message a "naughty" child, who does unlink(Sup), could hang the +%% supervisor. %% Returns: ok | {error, OtherReason} (this should be reported) %%----------------------------------------------------------------- shutdown(Pid, brutal_kill) -> - + case monitor_child(Pid) of ok -> exit(Pid, kill), @@ -711,16 +701,16 @@ shutdown(Pid, brutal_kill) -> {'DOWN', _MRef, process, Pid, OtherReason} -> {error, OtherReason} end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end; shutdown(Pid, Time) -> - + case monitor_child(Pid) of ok -> exit(Pid, shutdown), %% Try to shutdown gracefully - receive + receive {'DOWN', _MRef, process, Pid, shutdown} -> ok; {'DOWN', _MRef, process, Pid, OtherReason} -> @@ -732,14 +722,14 @@ shutdown(Pid, Time) -> {error, OtherReason} end end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end. %% Help function to shutdown/2 switches from link to monitor approach monitor_child(Pid) -> - - %% Do the monitor operation first so that if the child dies + + %% Do the monitor operation first so that if the child dies %% before the monitoring is done causing a 'DOWN'-message with %% reason noproc, we will get the real reason in the 'EXIT'-message %% unless a naughty child has already done unlink... @@ -749,22 +739,22 @@ monitor_child(Pid) -> receive %% If the child dies before the unlik we must empty %% the mail-box of the 'EXIT'-message and the 'DOWN'-message. - {'EXIT', Pid, Reason} -> - receive + {'EXIT', Pid, Reason} -> + receive {'DOWN', _, process, Pid, _} -> {error, Reason} end - after 0 -> + after 0 -> %% If a naughty child did unlink and the child dies before - %% monitor the result will be that shutdown/2 receives a + %% monitor the result will be that shutdown/2 receives a %% 'DOWN'-message with reason noproc. %% If the child should die after the unlink there %% will be a 'DOWN'-message with a correct reason - %% that will be handled in shutdown/2. - ok + %% that will be handled in shutdown/2. + ok end. - - + + %%----------------------------------------------------------------- %% Child/State manipulating functions. %%----------------------------------------------------------------- @@ -818,7 +808,7 @@ remove_child(Child, State) -> %% Args: SupName = {local, atom()} | {global, atom()} | self %% Type = {Strategy, MaxIntensity, Period} %% Strategy = one_for_one | one_for_all | simple_one_for_one | -%% rest_for_one +%% rest_for_one %% MaxIntensity = integer() %% Period = integer() %% Mod :== atom() @@ -912,10 +902,10 @@ validChildType(supervisor) -> true; validChildType(worker) -> true; validChildType(What) -> throw({invalid_child_type, What}). -validName(_Name) -> true. +validName(_Name) -> true. -validFunc({M, F, A}) when is_atom(M), - is_atom(F), +validFunc({M, F, A}) when is_atom(M), + is_atom(F), is_list(A) -> true; validFunc(Func) -> throw({invalid_mfa, Func}). @@ -932,7 +922,7 @@ validDelay(Delay) when is_number(Delay), Delay >= 0 -> true; validDelay(What) -> throw({invalid_delay, What}). -validShutdown(Shutdown, _) +validShutdown(Shutdown, _) when is_integer(Shutdown), Shutdown > 0 -> true; validShutdown(infinity, supervisor) -> true; validShutdown(brutal_kill, _) -> true; @@ -958,7 +948,7 @@ validMods(Mods) -> throw({invalid_modules, Mods}). %%% Returns: {ok, State'} | {terminate, State'} %%% ------------------------------------------------------ -add_restart(State) -> +add_restart(State) -> I = State#state.intensity, P = State#state.period, R = State#state.restarts, -- cgit v1.2.1 From 54753b4b0b5a803ebe4777bc7e771b8e43d6fa1f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 16:04:41 +0100 Subject: Unify recovery into one boot step, based binding recovery on the queues that have been recovered. --- src/rabbit.erl | 47 +++++++++++++++++++++++++++++++++++++---------- src/rabbit_amqqueue.erl | 7 +++---- src/rabbit_binding.erl | 22 ++++++++-------------- src/rabbit_exchange.erl | 41 +++++++---------------------------------- 4 files changed, 55 insertions(+), 62 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 807e9e7d..86c53ff6 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -27,7 +27,7 @@ %%--------------------------------------------------------------------------- %% Boot steps. --export([maybe_insert_default_data/0, boot_delegate/0]). +-export([maybe_insert_default_data/0, boot_delegate/0, recover/0]). -rabbit_boot_step({codec_correctness_check, [{description, "codec correctness check"}, @@ -123,15 +123,9 @@ {requires, core_initialized}, {enables, routing_ready}]}). --rabbit_boot_step({exchange_recovery, - [{description, "exchange recovery"}, - {mfa, {rabbit_exchange, recover, []}}, - {requires, empty_db_check}, - {enables, routing_ready}]}). - --rabbit_boot_step({queue_sup_queue_recovery, - [{description, "queue supervisor and queue recovery"}, - {mfa, {rabbit_amqqueue, start, []}}, +-rabbit_boot_step({recovery, + [{description, "exchange / queue recovery"}, + {mfa, {rabbit, recover, []}}, {requires, empty_db_check}, {enables, routing_ready}]}). @@ -186,6 +180,7 @@ -spec(maybe_insert_default_data/0 :: () -> 'ok'). -spec(boot_delegate/0 :: () -> 'ok'). +-spec(recover/0 :: () -> 'ok'). -endif. @@ -464,6 +459,38 @@ boot_delegate() -> {ok, Count} = application:get_env(rabbit, delegate_count), rabbit_sup:start_child(delegate_sup, [Count]). +recover() -> + Xs = rabbit_exchange:recover(), + Qs = rabbit_amqqueue:start(), + Bs = rabbit_binding:recover(Qs), + {RecXBs, NoRecSrcBs} = filter_recovered_exchanges(Xs, Bs), + ok = recovery_callbacks(RecXBs, NoRecSrcBs). + +filter_recovered_exchanges(Xs, Bs) -> + RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), + lists:foldl( + fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> + case dict:find(Src, RecXs) of + {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; + error -> {RecXBs, dict:append(Src, B, NoRecXBs)} + end + end, {dict:new(), dict:new()}, Bs). + +recovery_callbacks(RecXBs, NoRecXBs) -> + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + dict:map(fun (X, Bs) -> + rabbit_exchange:callback(X, start, [Tx, X, Bs]) + end, RecXBs), + dict:map(fun (Src, Bs) -> + {ok, X} = rabbit_exchange:lookup(Src), + rabbit_exchange:callback(X, add_bindings, + [Tx, X, Bs]) + end, NoRecXBs) + end), + ok. + maybe_insert_default_data() -> case rabbit_mnesia:is_db_empty() of true -> insert_default_data(); diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index c7391965..2618c1f5 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -57,7 +57,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(start/0 :: () -> 'ok'). +-spec(start/0 :: () -> [rabbit_types:amqqueue()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), @@ -166,8 +166,7 @@ start() -> {rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []}, transient, infinity, supervisor, [rabbit_amqqueue_sup]}), - _RealDurableQueues = recover_durable_queues(DurableQueues), - ok. + recover_durable_queues(DurableQueues). stop() -> ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup), @@ -188,7 +187,7 @@ find_durable_queues() -> recover_durable_queues(DurableQueues) -> Qs = [start_queue_process(Q) || Q <- DurableQueues], [Q || Q <- Qs, - gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == Q]. + gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index c9cf0a39..e656cfc7 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -17,7 +17,7 @@ -module(rabbit_binding). -include("rabbit.hrl"). --export([recover/0, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([recover/1, exists/1, add/1, remove/1, add/2, remove/2, list/1]). -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, @@ -50,7 +50,7 @@ -opaque(deletions() :: dict()). --spec(recover/0 :: () -> [rabbit_types:binding()]). +-spec(recover/1 :: ([rabbit_types:amqqueue()]) -> [rabbit_types:binding()]). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). -spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). @@ -93,10 +93,11 @@ destination_name, destination_kind, routing_key, arguments]). -recover() -> +recover(Qs) -> + QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case should_recover(B) of + case should_recover(B, QNames) of true -> {_, Rev} = route_with_reverse(Route), ok = mnesia:write(rabbit_route, Route, write), ok = mnesia:write(rabbit_reverse_route, Rev, write), @@ -105,19 +106,12 @@ recover() -> end end, [], rabbit_durable_route). -should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}) -> +should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, + QNames) -> case mnesia:read({rabbit_route, B}) of [] -> case Kind of exchange -> true; - queue -> case mnesia:read({rabbit_durable_queue, Dest}) of - [Q] -> #amqqueue{pid = Pid} = Q, - Node = node(), - case node(Pid) of - Node -> true; - _ -> false - end; - _ -> false - end + queue -> sets:is_element(Dest, QNames) end; _ -> false end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 572a0b70..fa837d0c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -83,41 +83,14 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_fold( - fun (X = #exchange{name = XName}, Acc) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc]; - [_] -> Acc - end - end, [], rabbit_durable_exchange), - Bs = rabbit_binding:recover(), - {RecXBs, NoRecXBs} = filter_recovered_exchanges(Xs, Bs), - ok = recovery_callbacks(RecXBs, NoRecXBs). - -filter_recovered_exchanges(Xs, Bs) -> - RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), - lists:foldl( - fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> - case dict:find(Src, RecXs) of - {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; - error -> {ok, X} = lookup(Src), - {RecXBs, dict:append(X, B, NoRecXBs)} + rabbit_misc:table_fold( + fun (X = #exchange{name = XName}, Acc) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc]; + [_] -> Acc end - end, {dict:new(), dict:new()}, Bs). - -recovery_callbacks(RecXBs, NoRecXBs) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> - dict:map(fun (X = #exchange{type = Type}, Bs) -> - (type_to_module(Type)):start(Tx, X, Bs) - end, RecXBs), - dict:map(fun (X = #exchange{type = Type}, Bs) -> - (type_to_module(Type)):add_bindings(Tx, X, Bs) - end, NoRecXBs) - end), - ok. + end, [], rabbit_durable_exchange). callback(#exchange{type = XType}, Fun, Args) -> apply(type_to_module(XType), Fun, Args). -- cgit v1.2.1 From 338aad71454799c932b875b9ce7e57bcedf44793 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 16:18:42 +0100 Subject: Fix tests. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 9b122a02..89d0d162 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2322,7 +2322,7 @@ test_queue_recover() -> after 10000 -> exit(timeout_waiting_for_queue_death) end, rabbit_amqqueue:stop(), - ok = rabbit_amqqueue:start(), + rabbit_amqqueue:start(), rabbit_amqqueue:with_or_die( QName, fun (Q1 = #amqqueue { pid = QPid1 }) -> -- cgit v1.2.1 From 3833c25b23c209f3c5a77d14ec459b15c82b7f55 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 16:25:43 +0100 Subject: Recover e2e properly. --- src/rabbit.erl | 2 +- src/rabbit_binding.erl | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 86c53ff6..6b6731a3 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -462,7 +462,7 @@ boot_delegate() -> recover() -> Xs = rabbit_exchange:recover(), Qs = rabbit_amqqueue:start(), - Bs = rabbit_binding:recover(Qs), + Bs = rabbit_binding:recover(Xs, Qs), {RecXBs, NoRecSrcBs} = filter_recovered_exchanges(Xs, Bs), ok = recovery_callbacks(RecXBs, NoRecSrcBs). diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index e656cfc7..fff9016c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -17,7 +17,7 @@ -module(rabbit_binding). -include("rabbit.hrl"). --export([recover/1, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([recover/2, exists/1, add/1, remove/1, add/2, remove/2, list/1]). -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, @@ -50,7 +50,8 @@ -opaque(deletions() :: dict()). --spec(recover/1 :: ([rabbit_types:amqqueue()]) -> [rabbit_types:binding()]). +-spec(recover/2 :: ([rabbit_types:exchange()], [rabbit_types:amqqueue()]) -> + [rabbit_types:binding()]). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). -spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). @@ -93,11 +94,12 @@ destination_name, destination_kind, routing_key, arguments]). -recover(Qs) -> +recover(Xs, Qs) -> + XNames = sets:from_list([Name || #exchange{name = Name} <- Xs]), QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case should_recover(B, QNames) of + case should_recover(B, XNames, QNames) of true -> {_, Rev} = route_with_reverse(Route), ok = mnesia:write(rabbit_route, Route, write), ok = mnesia:write(rabbit_reverse_route, Rev, write), @@ -107,12 +109,12 @@ recover(Qs) -> end, [], rabbit_durable_route). should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, - QNames) -> + XNames, QNames) -> case mnesia:read({rabbit_route, B}) of - [] -> case Kind of - exchange -> true; - queue -> sets:is_element(Dest, QNames) - end; + [] -> sets:is_element(Dest, case Kind of + exchange -> XNames; + queue -> QNames + end); _ -> false end. -- cgit v1.2.1 From 8a749d93ff409f665ff610ca62482705f672db13 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 31 Mar 2011 17:38:29 +0100 Subject: Slimmer gatherer termination --- src/rabbit_misc.erl | 10 +--------- src/rabbit_msg_store.erl | 2 +- src/rabbit_queue_index.erl | 6 +++--- src/test_sup.erl | 2 +- 4 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 2e9563cf..1daeeb2a 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -48,8 +48,7 @@ -export([sort_field_table/1]). -export([pid_to_string/1, string_to_pid/1]). -export([version_compare/2, version_compare/3]). --export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3, - unlink_and_capture_exit/1]). +-export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). -export([now_ms/0]). @@ -178,7 +177,6 @@ -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})). -spec(dict_cons/3 :: (any(), any(), dict()) -> dict()). -spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()). --spec(unlink_and_capture_exit/1 :: (pid()) -> 'ok'). -spec(get_options/2 :: ([optdef()], [string()]) -> {[string()], [{string(), any()}]}). -spec(all_module_attributes/1 :: (atom()) -> [{atom(), [term()]}]). @@ -749,12 +747,6 @@ dict_cons(Key, Value, Dict) -> orddict_cons(Key, Value, Dict) -> orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict). -unlink_and_capture_exit(Pid) -> - unlink(Pid), - receive {'EXIT', Pid, _} -> ok - after 0 -> ok - end. - %% Separate flags and options from arguments. %% get_options([{flag, "-q"}, {option, "-p", "/"}], %% ["set_permissions","-p","/","guest", diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 34c793ec..65688142 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1511,8 +1511,8 @@ build_index(Gatherer, Left, [], sum_file_size = SumFileSize }) -> case gatherer:out(Gatherer) of empty -> + unlink(Gatherer), ok = gatherer:stop(Gatherer), - ok = rabbit_misc:unlink_and_capture_exit(Gatherer), ok = index_delete_by_file(undefined, State), Offset = case ets:lookup(FileSummaryEts, Left) of [] -> 0; diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index 367953b8..aaf3df78 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -514,8 +514,8 @@ queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) -> queue_index_walker({next, Gatherer}) when is_pid(Gatherer) -> case gatherer:out(Gatherer) of empty -> + unlink(Gatherer), ok = gatherer:stop(Gatherer), - ok = rabbit_misc:unlink_and_capture_exit(Gatherer), finished; {value, {MsgId, Count}} -> {MsgId, Count, {next, Gatherer}} @@ -1036,8 +1036,8 @@ foreach_queue_index(Funs) -> end) end || QueueDirName <- QueueDirNames], empty = gatherer:out(Gatherer), - ok = gatherer:stop(Gatherer), - ok = rabbit_misc:unlink_and_capture_exit(Gatherer). + unlink(Gatherer), + ok = gatherer:stop(Gatherer). transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) -> ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun), diff --git a/src/test_sup.erl b/src/test_sup.erl index b4df1fd0..5fc0eac0 100644 --- a/src/test_sup.erl +++ b/src/test_sup.erl @@ -46,7 +46,7 @@ with_sup(RestartStrategy, Fun) -> {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]), Res = Fun(SupPid), exit(SupPid, shutdown), - rabbit_misc:unlink_and_capture_exit(SupPid), + unlink(SupPid), Res. init([RestartStrategy]) -> -- cgit v1.2.1 From 9f5fcfc3b5ac739ac084cafea30f41715a669e85 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 31 Mar 2011 17:54:19 +0100 Subject: Introduce rabbit_semi_durable_route to kee bindings between transient exchanges and durable queues alive if the durable queue is down. --- src/rabbit_binding.erl | 28 +++++++++++++++++++--------- src/rabbit_mnesia.erl | 5 +++++ src/rabbit_upgrade_functions.erl | 6 ++++++ 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index fff9016c..8a1c3c35 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,6 +97,10 @@ recover(Xs, Qs) -> XNames = sets:from_list([Name || #exchange{name = Name} <- Xs]), QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), + rabbit_misc:table_fold( + fun (Route, ok) -> + ok = mnesia:write(rabbit_semi_durable_route, Route, write) + end, ok, rabbit_durable_route), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> case should_recover(B, XNames, QNames) of @@ -106,7 +110,7 @@ recover(Xs, Qs) -> [B | Acc]; false -> Acc end - end, [], rabbit_durable_route). + end, [], rabbit_semi_durable_route). should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, XNames, QNames) -> @@ -138,7 +142,7 @@ add(Binding, InnerFun) -> case InnerFun(Src, Dst) of ok -> case mnesia:read({rabbit_route, B}) of - [] -> ok = sync_binding(B, all_durable([Src, Dst]), + [] -> ok = sync_binding(B, Src, Dst, fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback( @@ -166,7 +170,7 @@ remove(Binding, InnerFun) -> [_] -> case InnerFun(Src, Dst) of ok -> - ok = sync_binding(B, all_durable([Src, Dst]), + ok = sync_binding(B, Src, Dst, fun mnesia:delete_object/3), {ok, maybe_auto_delete(B#binding.source, [B], new_deletions())}; @@ -239,7 +243,8 @@ has_for_source(SrcName) -> %% we need to check for durable routes here too in case a bunch of %% routes to durable queues have been removed temporarily as a %% result of a node failure - contains(rabbit_route, Match) orelse contains(rabbit_durable_route, Match). + contains(rabbit_route, Match) orelse contains(rabbit_semi_durable_route, + Match). remove_for_source(SrcName) -> [begin @@ -276,13 +281,17 @@ binding_action(Binding = #binding{source = SrcName, Fun(Src, Dst, Binding#binding{args = SortedArgs}) end). -sync_binding(Binding, Durable, Fun) -> - ok = case Durable of - true -> Fun(rabbit_durable_route, - #route{binding = Binding}, write); +sync_binding(Binding, Src, Dest, Fun) -> + {Route, ReverseRoute} = route_with_reverse(Binding), + ok = case all_durable([Src, Dest]) of + true -> Fun(rabbit_durable_route, Route, write); false -> ok end, - {Route, ReverseRoute} = route_with_reverse(Binding), + ok = case Dest of + #amqqueue{durable = true} -> Fun(rabbit_semi_durable_route, Route, + write); + _ -> ok + end, ok = Fun(rabbit_route, Route, write), ok = Fun(rabbit_reverse_route, ReverseRoute, write), ok. @@ -363,6 +372,7 @@ maybe_auto_delete(XName, Bindings, Deletions) -> delete_forward_routes(Route) -> ok = mnesia:delete_object(rabbit_route, Route, write), + ok = mnesia:delete_object(rabbit_semi_durable_route, Route, write), ok = mnesia:delete_object(rabbit_durable_route, Route, write). delete_transient_forward_routes(Route) -> diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index fbcf07ae..77b06d0c 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -187,6 +187,11 @@ table_definitions() -> {attributes, record_info(fields, route)}, {disc_copies, [node()]}, {match, #route{binding = binding_match(), _='_'}}]}, + {rabbit_semi_durable_route, + [{record_name, route}, + {attributes, record_info(fields, route)}, + {type, ordered_set}, + {match, #route{binding = binding_match(), _='_'}}]}, {rabbit_route, [{record_name, route}, {attributes, record_info(fields, route)}, diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 7567c29e..842c3b4f 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -26,6 +26,7 @@ -rabbit_upgrade({internal_exchanges, mnesia, []}). -rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}). -rabbit_upgrade({topic_trie, mnesia, []}). +-rabbit_upgrade({semi_durable_route, mnesia, []}). %% ------------------------------------------------------------------- @@ -37,6 +38,7 @@ -spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). -spec(topic_trie/0 :: () -> 'ok'). +-spec(semi_durable_route/0 :: () -> 'ok'). -endif. @@ -101,6 +103,10 @@ topic_trie() -> {attributes, [trie_binding, value]}, {type, ordered_set}]). +semi_durable_route() -> + create(rabbit_semi_durable_route, [{record_name, route}, + {attributes, [binding, value]}]). + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> -- cgit v1.2.1 From ff78d574ece961bcafc3efe1fbd235893d8ea28a Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 1 Apr 2011 04:06:07 +0100 Subject: fix some R12B-5isms --- src/gm.erl | 8 ++++---- src/rabbit_exchange_type_topic.erl | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 1edcde11..aa5ba146 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -1011,7 +1011,7 @@ prune_or_create_group(Self, GroupName) -> fun () -> GroupNew = #gm_group { name = GroupName, members = [Self], version = 0 }, - case mnesia:read(?GROUP_TABLE, GroupName) of + case mnesia:read(?GROUP_TABLE, GroupName, read) of [] -> mnesia:write(GroupNew), GroupNew; @@ -1029,7 +1029,7 @@ record_dead_member_in_group(Member, GroupName) -> {atomic, Group} = mnesia:sync_transaction( fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName), + mnesia:read(?GROUP_TABLE, GroupName, read), case lists:splitwith( fun (Member1) -> Member1 =/= Member end, Members) of {_Members1, []} -> %% not found - already recorded dead @@ -1049,7 +1049,7 @@ record_new_member_in_group(GroupName, Left, NewMember, Fun) -> mnesia:sync_transaction( fun () -> [#gm_group { members = Members, version = Ver } = Group1] = - mnesia:read(?GROUP_TABLE, GroupName), + mnesia:read(?GROUP_TABLE, GroupName, read), {Prefix, [Left | Suffix]} = lists:splitwith(fun (M) -> M =/= Left end, Members), Members1 = Prefix ++ [Left, NewMember | Suffix], @@ -1068,7 +1068,7 @@ erase_members_in_group(Members, GroupName) -> fun () -> [Group1 = #gm_group { members = [_|_] = Members1, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName), + mnesia:read(?GROUP_TABLE, GroupName, read), case Members1 -- DeadMembers of Members1 -> Group1; Members2 -> Group2 = diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index ffd1e583..a61e380b 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -191,7 +191,7 @@ trie_child(X, Node, Word) -> case mnesia:read(rabbit_topic_trie_edge, #trie_edge{exchange_name = X, node_id = Node, - word = Word}) of + word = Word}, read) of [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; [] -> error end. -- cgit v1.2.1 From 3da0764b6482d711bff0faca201fc5851543ad81 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 11:03:52 +0100 Subject: Use names, not exchanges / queues. --- src/rabbit.erl | 34 +++++++++++++++++----------------- src/rabbit_amqqueue.erl | 2 +- src/rabbit_binding.erl | 8 ++++---- src/rabbit_exchange.erl | 2 +- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 6b6731a3..fe392c5f 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -460,34 +460,34 @@ boot_delegate() -> rabbit_sup:start_child(delegate_sup, [Count]). recover() -> - Xs = rabbit_exchange:recover(), - Qs = rabbit_amqqueue:start(), - Bs = rabbit_binding:recover(Xs, Qs), - {RecXBs, NoRecSrcBs} = filter_recovered_exchanges(Xs, Bs), - ok = recovery_callbacks(RecXBs, NoRecSrcBs). + XNames = rabbit_exchange:recover(), + QNames = rabbit_amqqueue:start(), + Bs = rabbit_binding:recover(XNames, QNames), + {RecXBs, NoRecXBs} = filter_recovered_exchanges(XNames, Bs), + ok = recovery_callbacks(RecXBs, NoRecXBs). filter_recovered_exchanges(Xs, Bs) -> - RecXs = dict:from_list([{XName, X} || X = #exchange{name = XName} <- Xs]), + RecXs = sets:from_list(Xs), lists:foldl( fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> - case dict:find(Src, RecXs) of - {ok, X} -> {dict:append(X, B, RecXBs), NoRecXBs}; - error -> {RecXBs, dict:append(Src, B, NoRecXBs)} + case sets:is_element(Src, RecXs) of + true -> {dict:append(Src, B, RecXBs), NoRecXBs}; + false -> {RecXBs, dict:append(Src, B, NoRecXBs)} end end, {dict:new(), dict:new()}, Bs). recovery_callbacks(RecXBs, NoRecXBs) -> + CB = fun (Tx, F, XBs) -> + dict:map(fun (XName, Bs) -> + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, F, [Tx, X, Bs]) + end, XBs) + end, rabbit_misc:execute_mnesia_transaction( fun () -> ok end, fun (ok, Tx) -> - dict:map(fun (X, Bs) -> - rabbit_exchange:callback(X, start, [Tx, X, Bs]) - end, RecXBs), - dict:map(fun (Src, Bs) -> - {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_bindings, - [Tx, X, Bs]) - end, NoRecXBs) + CB(Tx, start, RecXBs), + CB(Tx, add_bindings, NoRecXBs) end), ok. diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 2618c1f5..6267b823 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -186,7 +186,7 @@ find_durable_queues() -> recover_durable_queues(DurableQueues) -> Qs = [start_queue_process(Q) || Q <- DurableQueues], - [Q || Q <- Qs, + [Q#amqqueue.name || Q <- Qs, gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index fff9016c..47793920 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -94,12 +94,12 @@ destination_name, destination_kind, routing_key, arguments]). -recover(Xs, Qs) -> - XNames = sets:from_list([Name || #exchange{name = Name} <- Xs]), - QNames = sets:from_list([Name || #amqqueue{name = Name} <- Qs]), +recover(XsL, QsL) -> + Xs = sets:from_list(XsL), + Qs = sets:from_list(QsL), rabbit_misc:table_fold( fun (Route = #route{binding = B}, Acc) -> - case should_recover(B, XNames, QNames) of + case should_recover(B, Xs, Qs) of true -> {_, Rev} = route_with_reverse(Route), ok = mnesia:write(rabbit_route, Route, write), ok = mnesia:write(rabbit_reverse_route, Rev, write), diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index fa837d0c..e05a8812 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -87,7 +87,7 @@ recover() -> fun (X = #exchange{name = XName}, Acc) -> case mnesia:read({rabbit_exchange, XName}) of [] -> ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc]; + [XName | Acc]; [_] -> Acc end end, [], rabbit_durable_exchange). -- cgit v1.2.1 From 89843846fdc48408d78138ad398771d99e3151d8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 11:07:51 +0100 Subject: Dest -> Dst --- src/rabbit_binding.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 0263144a..26eb78fb 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -112,13 +112,13 @@ recover(XsL, QsL) -> end end, [], rabbit_semi_durable_route). -should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, +should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, XNames, QNames) -> case mnesia:read({rabbit_route, B}) of - [] -> sets:is_element(Dest, case Kind of - exchange -> XNames; - queue -> QNames - end); + [] -> sets:is_element(Dst, case Kind of + exchange -> XNames; + queue -> QNames + end); _ -> false end. @@ -281,13 +281,13 @@ binding_action(Binding = #binding{source = SrcName, Fun(Src, Dst, Binding#binding{args = SortedArgs}) end). -sync_binding(Binding, Src, Dest, Fun) -> +sync_binding(Binding, Src, Dst, Fun) -> {Route, ReverseRoute} = route_with_reverse(Binding), - ok = case all_durable([Src, Dest]) of + ok = case all_durable([Src, Dst]) of true -> Fun(rabbit_durable_route, Route, write); false -> ok end, - ok = case Dest of + ok = case Dst of #amqqueue{durable = true} -> Fun(rabbit_semi_durable_route, Route, write); _ -> ok -- cgit v1.2.1 From ff083aa2ddb84a35c106465d27035f5df78c3166 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 11:31:34 +0100 Subject: Make things more symmetrical. This adds extra rows into the semi durable table for bindings from transient to durable exchanges, but since we recover bindings based on exchanges and queues recovered this should not have a visible change. --- src/rabbit_binding.erl | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 26eb78fb..64178f37 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -266,10 +266,8 @@ remove_transient_for_destination(DstName) -> %%---------------------------------------------------------------------------- -all_durable(Resources) -> - lists:all(fun (#exchange{durable = D}) -> D; - (#amqqueue{durable = D}) -> D - end, Resources). +durable(#exchange{durable = D}) -> D; +durable(#amqqueue{durable = D}) -> D. binding_action(Binding = #binding{source = SrcName, destination = DstName, @@ -283,14 +281,13 @@ binding_action(Binding = #binding{source = SrcName, sync_binding(Binding, Src, Dst, Fun) -> {Route, ReverseRoute} = route_with_reverse(Binding), - ok = case all_durable([Src, Dst]) of + ok = case durable(Src) andalso durable(Dst) of true -> Fun(rabbit_durable_route, Route, write); false -> ok end, - ok = case Dst of - #amqqueue{durable = true} -> Fun(rabbit_semi_durable_route, Route, - write); - _ -> ok + ok = case durable(Dst) of + true -> Fun(rabbit_semi_durable_route, Route, write); + false -> ok end, ok = Fun(rabbit_route, Route, write), ok = Fun(rabbit_reverse_route, ReverseRoute, write), -- cgit v1.2.1 From b0cd16cc42cef2f0486132e3c544f4e4c260c772 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 11:40:17 +0100 Subject: Well it's almost hypnotic in its ugliness. Pending nicer fix from eldorado --- src/rabbit_misc.erl | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 9156d87e..32c4def0 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -40,8 +40,7 @@ -export([upmap/2, map_in_order/2]). -export([table_fold/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). --export([read_term_file/1, write_term_file/2]). --export([write_file/3, run_ok_monad/2]). +-export([read_term_file/1, write_term_file/2, write_file/3]). -export([append_file/2, ensure_parent_dirs_exist/1]). -export([format_stderr/2]). -export([start_applications/1, stop_applications/1]). @@ -63,7 +62,7 @@ -ifdef(use_specs). --export_type([resource_name/0, thunk/1, const/1, ok_monad_fun/0]). +-export_type([resource_name/0, thunk/1, const/1]). -type(ok_or_error() :: rabbit_types:ok_or_error(any())). -type(thunk(T) :: fun(() -> T)). @@ -77,8 +76,6 @@ fun ((atom(), [term()]) -> [{digraph:vertex(), digraph_label()}])). -type(graph_edge_fun() :: fun ((atom(), [term()]) -> [{digraph:vertex(), digraph:vertex()}])). --type(ok_monad_fun() :: - fun((any()) -> 'ok' | rabbit_types:ok_or_error2(any(), any()))). -spec(method_record_type/1 :: (rabbit_framing:amqp_method_record()) -> rabbit_framing:amqp_method_name()). @@ -158,8 +155,6 @@ (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())). -spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()). -spec(write_file/3 :: (file:filename(), boolean(), binary()) -> ok_or_error()). --spec(run_ok_monad/2 :: ([ok_monad_fun()], any()) -> - rabbit_types:ok_or_error(any())). -spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). -spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). -spec(format_stderr/2 :: (string(), [any()]) -> 'ok'). @@ -527,22 +522,26 @@ write_file(Path, Append, Binary) when is_binary(Binary) -> true -> [read]; false -> [] end], - run_ok_monad( - [fun (ok) -> file:open(Path, Modes) end, - fun (Hdl) -> run_ok_monad( - [fun (ok) -> file:position(Hdl, eof) end, - fun (_Pos) -> file:write(Hdl, Binary) end, - fun (_Pos) -> file:sync(Hdl) end, - fun (_Pos) -> file:close(Hdl) end], ok) - end], ok). - -run_ok_monad([], _State) -> - ok; -run_ok_monad([Fun|Funs], State) -> - case Fun(State) of - ok -> run_ok_monad(Funs, State); - {ok, State1} -> run_ok_monad(Funs, State1); - {error, _Err} = Error -> Error + case file:open(Path, Modes) of + {ok, Hdl} -> + case file:position(Hdl, eof) of + {ok, _Pos} -> + case file:write(Hdl, Binary) of + ok -> + case file:sync(Hdl) of + ok -> + file:close(Hdl); + {error, _} = E4 -> + E4 + end; + {error, _} = E3 -> + E3 + end; + {error, _} = E2 -> + E2 + end; + {error, _} = E1 -> + E1 end. append_file(File, Suffix) -> -- cgit v1.2.1 From e6173368d7e70429cbfb8c6be9080383ba7ee6c2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 11:46:23 +0100 Subject: greater than --- src/rabbit_misc.erl | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 32c4def0..52292bb1 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -531,17 +531,13 @@ write_file(Path, Append, Binary) when is_binary(Binary) -> case file:sync(Hdl) of ok -> file:close(Hdl); - {error, _} = E4 -> - E4 + {error, _} = E4 -> E4 end; - {error, _} = E3 -> - E3 + {error, _} = E3 -> E3 end; - {error, _} = E2 -> - E2 + {error, _} = E2 -> E2 end; - {error, _} = E1 -> - E1 + {error, _} = E1 -> E1 end. append_file(File, Suffix) -> -- cgit v1.2.1 From cafa8b94c8916fb06d508208cb1965cb8d78bd5a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 11:47:06 +0100 Subject: rename --- src/rabbit_misc.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 52292bb1..6962317f 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -531,13 +531,13 @@ write_file(Path, Append, Binary) when is_binary(Binary) -> case file:sync(Hdl) of ok -> file:close(Hdl); - {error, _} = E4 -> E4 + {error, _} = E -> E end; - {error, _} = E3 -> E3 + {error, _} = E -> E end; - {error, _} = E2 -> E2 + {error, _} = E -> E end; - {error, _} = E1 -> E1 + {error, _} = E -> E end. append_file(File, Suffix) -> -- cgit v1.2.1 From e12c48c0a75969d873fd761d8f8672d5bf32517d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 12:22:42 +0100 Subject: Revert to trailing whitespace et al to reduce diff to Erlang's supervisor --- src/supervisor2.erl | 104 ++++++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 73316db9..19a95328 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -115,10 +115,10 @@ behaviour_info(_Other) -> %%% --------------------------------------------------- start_link(Mod, Args) -> gen_server:start_link(?MODULE, {self, Mod, Args}, []). - + start_link(SupName, Mod, Args) -> gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []). - + %%% --------------------------------------------------- %%% Interface functions. %%% --------------------------------------------------- @@ -161,9 +161,9 @@ delayed_restart(Supervisor, RestartDetails) -> gen_server:cast(Supervisor, {delayed_restart, RestartDetails}). %%% --------------------------------------------------- -%%% +%%% %%% Initialize the supervisor. -%%% +%%% %%% --------------------------------------------------- init({SupName, Mod, Args}) -> process_flag(trap_exit, true), @@ -182,7 +182,7 @@ init({SupName, Mod, Args}) -> Error -> {stop, {bad_return, {Mod, init, Error}}} end. - + init_children(State, StartSpec) -> SupName = State#state.name, case check_startspec(StartSpec) of @@ -212,7 +212,7 @@ init_dynamic(_State, StartSpec) -> %% Func: start_children/2 %% Args: Children = [#child] in start order %% SupName = {local, atom()} | {global, atom()} | {pid(),Mod} -%% Purpose: Start all children. The new list contains #child's +%% Purpose: Start all children. The new list contains #child's %% with pids. %% Returns: {ok, NChildren} | {error, NChildren} %% NChildren = [#child] in termination order (reversed @@ -244,7 +244,7 @@ do_start_child(SupName, Child) -> NChild = Child#child{pid = Pid}, report_progress(NChild, SupName), {ok, Pid, Extra}; - ignore -> + ignore -> {ok, undefined}; {error, What} -> {error, What}; What -> {error, What} @@ -263,23 +263,23 @@ do_start_child_i(M, F, A) -> What -> {error, What} end. - + %%% --------------------------------------------------- -%%% +%%% %%% Callback functions. -%%% +%%% %%% --------------------------------------------------- handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) -> #child{mfa = {M, F, A}} = hd(State#state.children), Args = A ++ EArgs, case do_start_child_i(M, F, Args) of {ok, Pid} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid}, NState}; {ok, Pid, Extra} -> - NState = State#state{dynamics = + NState = State#state{dynamics = ?DICT:store(Pid, Args, State#state.dynamics)}, {reply, {ok, Pid, Extra}, NState}; What -> @@ -372,7 +372,7 @@ handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) -> %%% Hopefully cause a function-clause as there is no API function %%% that utilizes cast. handle_cast(null, State) -> - error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", + error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n", []), {noreply, State}. @@ -389,7 +389,7 @@ handle_info({'EXIT', Pid, Reason}, State) -> end; handle_info(Msg, State) -> - error_logger:error_msg("Supervisor received unexpected message: ~p~n", + error_logger:error_msg("Supervisor received unexpected message: ~p~n", [Msg]), {noreply, State}. %% @@ -439,13 +439,13 @@ check_flags({Strategy, MaxIntensity, Period}) -> check_flags(What) -> {bad_flags, What}. -update_childspec(State, StartSpec) when ?is_simple(State) -> - case check_startspec(StartSpec) of - {ok, [Child]} -> - {ok, State#state{children = [Child]}}; - Error -> - {error, Error} - end; +update_childspec(State, StartSpec) when ?is_simple(State) -> + case check_startspec(StartSpec) of + {ok, [Child]} -> + {ok, State#state{children = [Child]}}; + Error -> + {error, Error} + end; update_childspec(State, StartSpec) -> case check_startspec(StartSpec) of @@ -466,7 +466,7 @@ update_childspec1([Child|OldC], Children, KeepOld) -> end; update_childspec1([], Children, KeepOld) -> % Return them in (keeped) reverse start order. - lists:reverse(Children ++ KeepOld). + lists:reverse(Children ++ KeepOld). update_chsp(OldCh, Children) -> case lists:map(fun (Ch) when OldCh#child.name =:= Ch#child.name -> @@ -480,7 +480,7 @@ update_chsp(OldCh, Children) -> NewC -> {ok, NewC} end. - + %%% --------------------------------------------------- %%% Start a new child. %%% --------------------------------------------------- @@ -492,12 +492,12 @@ handle_start_child(Child, State) -> {ok, Pid} -> Children = State#state.children, {{ok, Pid}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {ok, Pid, Extra} -> Children = State#state.children, {{ok, Pid, Extra}, - State#state{children = + State#state{children = [Child#child{pid = Pid}|Children]}}; {error, What} -> {{error, {What, Child}}, State} @@ -681,17 +681,17 @@ do_terminate(Child, _SupName) -> Child. %%----------------------------------------------------------------- -%% Shutdowns a child. We must check the EXIT value +%% Shutdowns a child. We must check the EXIT value %% of the child, because it might have died with another reason than -%% the wanted. In that case we want to report the error. We put a -%% monitor on the child an check for the 'DOWN' message instead of -%% checking for the 'EXIT' message, because if we check the 'EXIT' -%% message a "naughty" child, who does unlink(Sup), could hang the -%% supervisor. +%% the wanted. In that case we want to report the error. We put a +%% monitor on the child an check for the 'DOWN' message instead of +%% checking for the 'EXIT' message, because if we check the 'EXIT' +%% message a "naughty" child, who does unlink(Sup), could hang the +%% supervisor. %% Returns: ok | {error, OtherReason} (this should be reported) %%----------------------------------------------------------------- shutdown(Pid, brutal_kill) -> - + case monitor_child(Pid) of ok -> exit(Pid, kill), @@ -701,16 +701,16 @@ shutdown(Pid, brutal_kill) -> {'DOWN', _MRef, process, Pid, OtherReason} -> {error, OtherReason} end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end; shutdown(Pid, Time) -> - + case monitor_child(Pid) of ok -> exit(Pid, shutdown), %% Try to shutdown gracefully - receive + receive {'DOWN', _MRef, process, Pid, shutdown} -> ok; {'DOWN', _MRef, process, Pid, OtherReason} -> @@ -722,14 +722,14 @@ shutdown(Pid, Time) -> {error, OtherReason} end end; - {error, Reason} -> + {error, Reason} -> {error, Reason} end. %% Help function to shutdown/2 switches from link to monitor approach monitor_child(Pid) -> - - %% Do the monitor operation first so that if the child dies + + %% Do the monitor operation first so that if the child dies %% before the monitoring is done causing a 'DOWN'-message with %% reason noproc, we will get the real reason in the 'EXIT'-message %% unless a naughty child has already done unlink... @@ -739,22 +739,22 @@ monitor_child(Pid) -> receive %% If the child dies before the unlik we must empty %% the mail-box of the 'EXIT'-message and the 'DOWN'-message. - {'EXIT', Pid, Reason} -> - receive + {'EXIT', Pid, Reason} -> + receive {'DOWN', _, process, Pid, _} -> {error, Reason} end - after 0 -> + after 0 -> %% If a naughty child did unlink and the child dies before - %% monitor the result will be that shutdown/2 receives a + %% monitor the result will be that shutdown/2 receives a %% 'DOWN'-message with reason noproc. %% If the child should die after the unlink there %% will be a 'DOWN'-message with a correct reason - %% that will be handled in shutdown/2. - ok + %% that will be handled in shutdown/2. + ok end. - - + + %%----------------------------------------------------------------- %% Child/State manipulating functions. %%----------------------------------------------------------------- @@ -808,7 +808,7 @@ remove_child(Child, State) -> %% Args: SupName = {local, atom()} | {global, atom()} | self %% Type = {Strategy, MaxIntensity, Period} %% Strategy = one_for_one | one_for_all | simple_one_for_one | -%% rest_for_one +%% rest_for_one %% MaxIntensity = integer() %% Period = integer() %% Mod :== atom() @@ -902,10 +902,10 @@ validChildType(supervisor) -> true; validChildType(worker) -> true; validChildType(What) -> throw({invalid_child_type, What}). -validName(_Name) -> true. +validName(_Name) -> true. -validFunc({M, F, A}) when is_atom(M), - is_atom(F), +validFunc({M, F, A}) when is_atom(M), + is_atom(F), is_list(A) -> true; validFunc(Func) -> throw({invalid_mfa, Func}). @@ -922,7 +922,7 @@ validDelay(Delay) when is_number(Delay), Delay >= 0 -> true; validDelay(What) -> throw({invalid_delay, What}). -validShutdown(Shutdown, _) +validShutdown(Shutdown, _) when is_integer(Shutdown), Shutdown > 0 -> true; validShutdown(infinity, supervisor) -> true; validShutdown(brutal_kill, _) -> true; @@ -948,7 +948,7 @@ validMods(Mods) -> throw({invalid_modules, Mods}). %%% Returns: {ok, State'} | {terminate, State'} %%% ------------------------------------------------------ -add_restart(State) -> +add_restart(State) -> I = State#state.intensity, P = State#state.period, R = State#state.restarts, -- cgit v1.2.1 From b2da2bb6b0f82eeb3d090c39fed2894f165d51da Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 12:25:20 +0100 Subject: Oops, missed this one. --- src/rabbit.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index fe392c5f..7942962c 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -485,7 +485,11 @@ recovery_callbacks(RecXBs, NoRecXBs) -> end, rabbit_misc:execute_mnesia_transaction( fun () -> ok end, - fun (ok, Tx) -> + fun (ok, Tx0) -> + Tx = case Tx0 of + true -> transaction; + false -> none + end, CB(Tx, start, RecXBs), CB(Tx, add_bindings, NoRecXBs) end), -- cgit v1.2.1 From 4c5f452226bad9d82ab3ad387d18a89262964307 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 1 Apr 2011 13:09:50 +0100 Subject: Unlink before exit --- src/test_sup.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test_sup.erl b/src/test_sup.erl index 5fc0eac0..150235da 100644 --- a/src/test_sup.erl +++ b/src/test_sup.erl @@ -45,8 +45,8 @@ test_supervisor_delayed_restart(SupPid) -> with_sup(RestartStrategy, Fun) -> {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]), Res = Fun(SupPid), - exit(SupPid, shutdown), unlink(SupPid), + exit(SupPid, shutdown), Res. init([RestartStrategy]) -> -- cgit v1.2.1 From 493f98d8b6fe5d85223e754b1c05b73903490857 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 1 Apr 2011 13:19:31 +0100 Subject: 'shutdown' is only treated specially if the child is a supervisor (previous behaviour was actually confused on this front). Some minor renaming, reordering, and inlining --- src/supervisor2.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/supervisor2.erl b/src/supervisor2.erl index 19a95328..ec1ee9cd 100644 --- a/src/supervisor2.erl +++ b/src/supervisor2.erl @@ -38,7 +38,7 @@ %% child is a supervisor and it exits normally (i.e. with reason of %% 'shutdown') then the child's parent also exits normally. %% -%% 5) normal, shutdown and {shutdown, _} exit reasons are all treated the same +%% 5) normal, and {shutdown, _} exit reasons are all treated the same %% (i.e. are regarded as normal exits) %% %% All modifications are (C) 2010-2011 VMware, Inc. @@ -548,11 +548,11 @@ do_restart(permanent, Reason, Child, State) -> report_error(child_terminated, Reason, Child, State#state.name), restart(Child, State); do_restart(Type, normal, Child, State) -> - normal_or_shutdown_restart(Type, Child, State); -do_restart(Type, shutdown, Child, State) -> - normal_or_shutdown_restart(Type, Child, State); + del_child_and_maybe_shutdown(Type, Child, State); do_restart(Type, {shutdown, _}, Child, State) -> - normal_or_shutdown_restart(Type, Child, State); + del_child_and_maybe_shutdown(Type, Child, State); +do_restart(Type, shutdown, Child = #child{child_type = supervisor}, State) -> + del_child_and_maybe_shutdown(Type, Child, State); do_restart(Type, Reason, Child, State) when Type =:= transient orelse Type =:= intrinsic -> report_error(child_terminated, Reason, Child, State#state.name), @@ -562,11 +562,10 @@ do_restart(temporary, Reason, Child, State) -> NState = state_del_child(Child, State), {ok, NState}. -normal_or_shutdown_restart(intrinsic, Child, State) -> +del_child_and_maybe_shutdown(intrinsic, Child, State) -> {shutdown, state_del_child(Child, State)}; -normal_or_shutdown_restart(_, Child, State) -> - NState = state_del_child(Child, State), - {ok, NState}. +del_child_and_maybe_shutdown(_, Child, State) -> + {ok, state_del_child(Child, State)}. restart(Child, State) -> case add_restart(State) of -- cgit v1.2.1 From cde255cac929da94d8722ca901b0b65876fe72f3 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 1 Apr 2011 14:27:29 +0100 Subject: use mnesia:read/1 instead of read/3 --- src/gm.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index aa5ba146..8b7dc70c 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -1011,7 +1011,7 @@ prune_or_create_group(Self, GroupName) -> fun () -> GroupNew = #gm_group { name = GroupName, members = [Self], version = 0 }, - case mnesia:read(?GROUP_TABLE, GroupName, read) of + case mnesia:read({?GROUP_TABLE, GroupName}) of [] -> mnesia:write(GroupNew), GroupNew; @@ -1029,7 +1029,7 @@ record_dead_member_in_group(Member, GroupName) -> {atomic, Group} = mnesia:sync_transaction( fun () -> [Group1 = #gm_group { members = Members, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName, read), + mnesia:read({?GROUP_TABLE, GroupName}), case lists:splitwith( fun (Member1) -> Member1 =/= Member end, Members) of {_Members1, []} -> %% not found - already recorded dead @@ -1049,7 +1049,7 @@ record_new_member_in_group(GroupName, Left, NewMember, Fun) -> mnesia:sync_transaction( fun () -> [#gm_group { members = Members, version = Ver } = Group1] = - mnesia:read(?GROUP_TABLE, GroupName, read), + mnesia:read({?GROUP_TABLE, GroupName}), {Prefix, [Left | Suffix]} = lists:splitwith(fun (M) -> M =/= Left end, Members), Members1 = Prefix ++ [Left, NewMember | Suffix], @@ -1068,7 +1068,7 @@ erase_members_in_group(Members, GroupName) -> fun () -> [Group1 = #gm_group { members = [_|_] = Members1, version = Ver }] = - mnesia:read(?GROUP_TABLE, GroupName, read), + mnesia:read({?GROUP_TABLE, GroupName}), case Members1 -- DeadMembers of Members1 -> Group1; Members2 -> Group2 = -- cgit v1.2.1 From c4098939f59b526754247b708f87e1760cea1c4d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 14:08:39 +0100 Subject: Another read/1. --- src/rabbit_exchange_type_topic.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index a61e380b..c192f8cf 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -188,10 +188,10 @@ follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) -> end. trie_child(X, Node, Word) -> - case mnesia:read(rabbit_topic_trie_edge, - #trie_edge{exchange_name = X, - node_id = Node, - word = Word}, read) of + case mnesia:read({rabbit_topic_trie_edge, + #trie_edge{exchange_name = X, + node_id = Node, + word = Word}}) of [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode}; [] -> error end. -- cgit v1.2.1 From c9990f38f58ed8101d83e6e5c527275761aa7a1a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 16:48:09 +0100 Subject: QAish updates. --- include/rabbit_exchange_type_spec.hrl | 3 +- src/rabbit.erl | 33 ++-------------------- src/rabbit_amqqueue.erl | 11 ++++---- src/rabbit_binding.erl | 53 +++++++++++++++++++++-------------- src/rabbit_exchange.erl | 26 ++++++++++------- src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_direct.erl | 4 +-- src/rabbit_exchange_type_fanout.erl | 4 +-- src/rabbit_exchange_type_headers.erl | 4 +-- src/rabbit_exchange_type_topic.erl | 9 ++---- 10 files changed, 67 insertions(+), 82 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index 8163b6f2..fd3ddf7e 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -20,8 +20,7 @@ -spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) -> rabbit_router:match_result()). -spec(validate/1 :: (rabbit_types:exchange()) -> 'ok'). --spec(start/3 :: (boolean(), rabbit_types:exchange(), - [rabbit_types:binding()]) -> 'ok'). +-spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). -spec(delete/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(add_bindings/3 :: (boolean(), rabbit_types:exchange(), diff --git a/src/rabbit.erl b/src/rabbit.erl index fe392c5f..2840a5b7 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -124,7 +124,7 @@ {enables, routing_ready}]}). -rabbit_boot_step({recovery, - [{description, "exchange / queue recovery"}, + [{description, "exchange, queue and binding recovery"}, {mfa, {rabbit, recover, []}}, {requires, empty_db_check}, {enables, routing_ready}]}). @@ -461,35 +461,8 @@ boot_delegate() -> recover() -> XNames = rabbit_exchange:recover(), - QNames = rabbit_amqqueue:start(), - Bs = rabbit_binding:recover(XNames, QNames), - {RecXBs, NoRecXBs} = filter_recovered_exchanges(XNames, Bs), - ok = recovery_callbacks(RecXBs, NoRecXBs). - -filter_recovered_exchanges(Xs, Bs) -> - RecXs = sets:from_list(Xs), - lists:foldl( - fun (B = #binding{source = Src}, {RecXBs, NoRecXBs}) -> - case sets:is_element(Src, RecXs) of - true -> {dict:append(Src, B, RecXBs), NoRecXBs}; - false -> {RecXBs, dict:append(Src, B, NoRecXBs)} - end - end, {dict:new(), dict:new()}, Bs). - -recovery_callbacks(RecXBs, NoRecXBs) -> - CB = fun (Tx, F, XBs) -> - dict:map(fun (XName, Bs) -> - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, F, [Tx, X, Bs]) - end, XBs) - end, - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> - CB(Tx, start, RecXBs), - CB(Tx, add_bindings, NoRecXBs) - end), - ok. + QNames = rabbit_amqqueue:recover(), + rabbit_binding:recover(XNames, QNames). maybe_insert_default_data() -> case rabbit_mnesia:is_db_empty() of diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 6267b823..34ed88bc 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -16,7 +16,8 @@ -module(rabbit_amqqueue). --export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). +-export([recover/0, stop/0, declare/5, delete_immediately/1, delete/3, + purge/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, @@ -57,7 +58,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(start/0 :: () -> [rabbit_types:amqqueue()]). +-spec(recover/0 :: () -> [rabbit_types:amqqueue()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), @@ -157,7 +158,7 @@ %%---------------------------------------------------------------------------- -start() -> +recover() -> DurableQueues = find_durable_queues(), {ok, BQ} = application:get_env(rabbit, backing_queue_module), ok = BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]), @@ -186,8 +187,8 @@ find_durable_queues() -> recover_durable_queues(DurableQueues) -> Qs = [start_queue_process(Q) || Q <- DurableQueues], - [Q#amqqueue.name || Q <- Qs, - gen_server2:call(Q#amqqueue.pid, {init, true}, infinity) == {new, Q}]. + [QName || Q = #amqqueue{name = QName, pid = Pid} <- Qs, + gen_server2:call(Pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 47793920..5ac9c871 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -50,8 +50,8 @@ -opaque(deletions() :: dict()). --spec(recover/2 :: ([rabbit_types:exchange()], [rabbit_types:amqqueue()]) -> - [rabbit_types:binding()]). +-spec(recover/2 :: ([rabbit_types:resource()], [rabbit_types:resource()]) -> + 'ok'). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). -spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). @@ -94,27 +94,38 @@ destination_name, destination_kind, routing_key, arguments]). -recover(XsL, QsL) -> - Xs = sets:from_list(XsL), - Qs = sets:from_list(QsL), - rabbit_misc:table_fold( - fun (Route = #route{binding = B}, Acc) -> - case should_recover(B, Xs, Qs) of - true -> {_, Rev} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, Rev, write), - [B | Acc]; - false -> Acc - end - end, [], rabbit_durable_route). +recover(XNames, QNames) -> + XNameSet = sets:from_list(XNames), + QNameSet = sets:from_list(QNames), + XBs = rabbit_misc:table_fold( + fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> + case should_recover(B, XNameSet, QNameSet) of + true -> {_, Rev} = route_with_reverse(Route), + ok = mnesia:write(rabbit_route, Route, write), + ok = mnesia:write(rabbit_reverse_route, Rev, + write), + rabbit_misc:dict_cons(Src, B, Acc); + false -> Acc + end + end, dict:new(), rabbit_durable_route), + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + dict:map(fun (XName, Bindings) -> + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, add_bindings, + [Tx, X, Bindings]) + end, XBs) + end), + ok. -should_recover(B = #binding{destination = Dest = #resource{ kind = Kind }}, - XNames, QNames) -> +should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, + XNameSet, QNameSet) -> case mnesia:read({rabbit_route, B}) of - [] -> sets:is_element(Dest, case Kind of - exchange -> XNames; - queue -> QNames - end); + [] -> sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end); _ -> false end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index e05a8812..7268b15d 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> 'ok'). +-spec(recover/0 :: () -> [rabbit_types:resource()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), @@ -83,14 +83,20 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - rabbit_misc:table_fold( - fun (X = #exchange{name = XName}, Acc) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> ok = mnesia:write(rabbit_exchange, X, write), - [XName | Acc]; - [_] -> Acc - end - end, [], rabbit_durable_exchange). + Xs = rabbit_misc:table_fold( + fun (X = #exchange{name = XName}, Acc) -> + case mnesia:read({rabbit_exchange, XName}) of + [] -> ok = mnesia:write(rabbit_exchange, X, write), + [X | Acc]; + [_] -> Acc + end + end, [], rabbit_durable_exchange), + rabbit_misc:execute_mnesia_transaction( + fun () -> ok end, + fun (ok, Tx) -> + [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] + end), + [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> apply(type_to_module(XType), Fun, Args). @@ -120,7 +126,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - ok = (type_to_module(Type)):start(Tx, Exchange, []), + ok = (type_to_module(Type)):create(Tx, Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index ad08eb86..0fede0be 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -27,7 +27,7 @@ behaviour_info(callbacks) -> {validate, 1}, %% called after declaration and recovery - {start, 3}, + {create, 2}, %% called after exchange (auto)deletion. {delete, 3}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 1658c9f8..200c2997 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -40,7 +40,7 @@ route(#exchange{name = Name}, rabbit_router:match_routing_key(Name, Routes). validate(_X) -> ok. -start(_Tx, _X, _Bs) -> ok. +create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. add_bindings(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 83afdd71..62568949 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -39,7 +39,7 @@ route(#exchange{name = Name}, _Delivery) -> rabbit_router:match_routing_key(Name, ['_']). validate(_X) -> ok. -start(_Tx, _X, _Bs) -> ok. +create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 0fe8404f..258e785a 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -113,7 +113,7 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind). validate(_X) -> ok. -start(_Tx, _X, _Bs) -> ok. +create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 52f468ee..efa5fb52 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, start/3, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -48,12 +48,7 @@ route(#exchange{name = X}, validate(_X) -> ok. -start(true, _X, Bs) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> - lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end); -start(false, _X, _Bs) -> +create(_Tx, _X) -> ok. delete(true, #exchange{name = X}, _Bs) -> -- cgit v1.2.1 From a958149498eb8822bfec1c21414bf64c7abe8517 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 16:55:45 +0100 Subject: Fix tests --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 89d0d162..c8ef4105 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2322,7 +2322,7 @@ test_queue_recover() -> after 10000 -> exit(timeout_waiting_for_queue_death) end, rabbit_amqqueue:stop(), - rabbit_amqqueue:start(), + rabbit_amqqueue:recover(), rabbit_amqqueue:with_or_die( QName, fun (Q1 = #amqqueue { pid = QPid1 }) -> -- cgit v1.2.1 From 1e9dfa0147c3e4526afdf5ceff58e3ee8ea293ed Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 1 Apr 2011 17:04:40 +0100 Subject: Fix tests again. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index c8ef4105..995b84d9 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -595,7 +595,7 @@ test_topic_matching() -> auto_delete = false, arguments = []}, %% create rabbit_exchange_type_topic:validate(X), - exchange_op_callback(X, start, [[]]), + exchange_op_callback(X, create, []), %% add some bindings Bindings = [#binding{source = XName, -- cgit v1.2.1 From 0cde3dca66d24578464ac64b233737962232b6db Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 12:10:06 +0100 Subject: Describe the problem. That's half the battle, right? --- src/rabbit_ssl.erl | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 59 insertions(+), 4 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index 1953b6b8..e03f8d10 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -89,8 +89,10 @@ find_by_type(Type, {rdnSequence, RDNs}) -> case [V || #'AttributeTypeAndValue'{type = T, value = V} <- lists:flatten(RDNs), T == Type] of - [{printableString, S}] -> S; - [] -> not_found + [{ST, S}] when ST =:= teletexString; ST =:= printableString; + ST =:= universalString; ST =:= utf8String; + ST =:= bmpString -> format_directory_string(ST, S); + [] -> not_found end. %%-------------------------------------------------------------------------- @@ -162,8 +164,8 @@ escape_rdn_value([C | S], middle) -> format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; ST =:= universalString; ST =:= utf8String; ST =:= bmpString -> - if is_binary(S) -> binary_to_list(S); - true -> S + if is_binary(S) -> format_directory_string(ST, binary_to_list(S)); + true -> format_directory_string(ST, S) end; format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2, $Z]}) -> @@ -171,3 +173,56 @@ format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]); format_asn1_value(V) -> io_lib:format("~p", [V]). + +%% DirectoryString { INTEGER : maxSize } ::= CHOICE { +%% teletexString TeletexString (SIZE (1..maxSize)), +%% printableString PrintableString (SIZE (1..maxSize)), +%% bmpString BMPString (SIZE (1..maxSize)), +%% universalString UniversalString (SIZE (1..maxSize)), +%% uTF8String UTF8String (SIZE (1..maxSize)) } +%% +%% Precise definitions of printable / teletexString are hard to come +%% by. This is what I reconstructed: +%% +%% printableString: +%% "intended to represent the limited character sets available to +%% mainframe input terminals" +%% http://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx +%% +%% teletexString: +%% "a sizable volume of software in the world treats TeletexString +%% (T61String) as a simple 8-bit string with mostly Windows Latin 1 +%% (superset of iso-8859-1) encoding" +%% http://www.mail-archive.com/asn1@asn1.org/msg00460.html +%% (however according to that link X.680 actually defines +%% TeletexString in some much more invovled and crazy way. I suggest +%% we treat it as Windows CP1252). +%% +%% bmpString: +%% UCS-2 according to RFC 3641. Hence cannot represent unicode characters +%% above 65535. +%% +%% universalString: +%% UCS-4 according to RFC 3641. +%% +%% utf8String: +%% UTF-8 according to RFC 3641. +%% +%% Within Rabbit we assume UTF-8 encoding. Since printableString is a +%% subset of ASCII it is also a subset of UTF-8. The others need +%% converting. +%% +%% Note for testing: the default Ubuntu configuration for openssl will +%% only create printableString or teletexString types no matter what +%% you do. Edit string_mask in the [req] section of +%% /etc/ssl/openssl.cnf to change this (see comments there). You +%% probably also need to set utf8 = yes to get it to accept UTF-8 on +%% the command line. +%% +%% TODO actually convert stuff here. + +format_directory_string(printableString, S) -> S; +format_directory_string(teletexString, S) -> S; +format_directory_string(bmpString, S) -> S; +format_directory_string(universalString, S) -> S; +format_directory_string(utf8String, S) -> S. -- cgit v1.2.1 From c2eb57a92430c598db99dab8aea83b30ebee9488 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 12:11:48 +0100 Subject: Tweak comment after rereading it. --- src/rabbit_ssl.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index e03f8d10..821dde99 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -187,6 +187,7 @@ format_asn1_value(V) -> %% printableString: %% "intended to represent the limited character sets available to %% mainframe input terminals" +%% A-Z a-z 0-9 ' ( ) + , - . / : = ? [space] %% http://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx %% %% teletexString: @@ -194,8 +195,9 @@ format_asn1_value(V) -> %% (T61String) as a simple 8-bit string with mostly Windows Latin 1 %% (superset of iso-8859-1) encoding" %% http://www.mail-archive.com/asn1@asn1.org/msg00460.html -%% (however according to that link X.680 actually defines -%% TeletexString in some much more invovled and crazy way. I suggest +%% +%% (However according to that link X.680 actually defines +%% TeletexString in some much more involved and crazy way. I suggest %% we treat it as Windows CP1252). %% %% bmpString: -- cgit v1.2.1 From 487bfdc364ba842c1591bb80b16e7fe0669c9e02 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 4 Apr 2011 13:38:41 +0100 Subject: Warn about config only if new config is absent --- scripts/rabbitmq-env | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env index 3e173949..a2ef8d3c 100755 --- a/scripts/rabbitmq-env +++ b/scripts/rabbitmq-env @@ -37,7 +37,8 @@ RABBITMQ_HOME="${SCRIPT_DIR}/.." NODENAME=rabbit@${HOSTNAME%%.*} # Load configuration from the rabbitmq.conf file -if [ -f /etc/rabbitmq/rabbitmq.conf ]; then +if [ -f /etc/rabbitmq/rabbitmq.conf ] && \ + [ ! -f /etc/rabbitmq/rabbitmq-env.conf ] ; then echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- " echo "location has moved to /etc/rabbitmq/rabbitmq-env.conf" fi -- cgit v1.2.1 From 27a5c6a996bd6b9ea3f9aa39a7868c0af15eb2e6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 14:07:34 +0100 Subject: Actually DTRT. Note that this knocks out the is_binary/1 check introduced in 22bf9ebcaebf; however despite that changeset's comment this does not seem to be needed with R14B (or any other release I tested). --- src/rabbit_ssl.erl | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index 821dde99..cd0d1a92 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -164,9 +164,7 @@ escape_rdn_value([C | S], middle) -> format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString; ST =:= universalString; ST =:= utf8String; ST =:= bmpString -> - if is_binary(S) -> format_directory_string(ST, binary_to_list(S)); - true -> format_directory_string(ST, S) - end; + format_directory_string(ST, S); format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2, $Z]}) -> io_lib:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ", @@ -198,11 +196,12 @@ format_asn1_value(V) -> %% %% (However according to that link X.680 actually defines %% TeletexString in some much more involved and crazy way. I suggest -%% we treat it as Windows CP1252). +%% we treat it as ISO-8859-1 since Erlang does not support Windows +%% Latin 1). %% %% bmpString: -%% UCS-2 according to RFC 3641. Hence cannot represent unicode characters -%% above 65535. +%% UCS-2 according to RFC 3641. Hence cannot represent Unicode +%% characters above 65535 (outside the "Basic Multilingual Plane"). %% %% universalString: %% UCS-4 according to RFC 3641. @@ -212,19 +211,37 @@ format_asn1_value(V) -> %% %% Within Rabbit we assume UTF-8 encoding. Since printableString is a %% subset of ASCII it is also a subset of UTF-8. The others need -%% converting. +%% converting. Fortunately since the Erlang SSL library does the +%% decoding for us (albeit into a weird format, see below), we just +%% need to handle encoding into UTF-8. %% %% Note for testing: the default Ubuntu configuration for openssl will %% only create printableString or teletexString types no matter what %% you do. Edit string_mask in the [req] section of %% /etc/ssl/openssl.cnf to change this (see comments there). You %% probably also need to set utf8 = yes to get it to accept UTF-8 on -%% the command line. -%% -%% TODO actually convert stuff here. +%% the command line. Also note I could not get openssl to generate a +%% universalString. format_directory_string(printableString, S) -> S; -format_directory_string(teletexString, S) -> S; -format_directory_string(bmpString, S) -> S; -format_directory_string(universalString, S) -> S; +format_directory_string(teletexString, S) -> utf8_list_from(S); +format_directory_string(bmpString, S) -> utf8_list_from(S); +format_directory_string(universalString, S) -> utf8_list_from(S); format_directory_string(utf8String, S) -> S. + +utf8_list_from(S) -> + binary_to_list( + unicode:characters_to_binary(flatten_ssl_list(S), utf32, utf8)). + +%% The Erlang SSL implementation invents its own representation for +%% non-ascii strings - looking like [97,{0,0,3,187}] (that's LATIN +%% SMALL LETTER A followed by GREEK SMALL LETTER LAMDA). We convert +%% this into a list of unicode characters, which we can tell +%% unicode:characters_to_binary is utf32. + +flatten_ssl_list(L) -> [flatten_ssl_list_item(I) || I <- L]. + +flatten_ssl_list_item({A, B, C, D}) -> + A * (1 bsl 24) + B * (1 bsl 16) + C * (1 bsl 8) + D; +flatten_ssl_list_item(N) when is_number (N) -> + N. -- cgit v1.2.1 From 2b965cec2671c62be3994bfb013e6eec0a7caac2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 14:18:26 +0100 Subject: Reduce duplication. --- src/rabbit_ssl.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index cd0d1a92..a3cd2b37 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -89,10 +89,8 @@ find_by_type(Type, {rdnSequence, RDNs}) -> case [V || #'AttributeTypeAndValue'{type = T, value = V} <- lists:flatten(RDNs), T == Type] of - [{ST, S}] when ST =:= teletexString; ST =:= printableString; - ST =:= universalString; ST =:= utf8String; - ST =:= bmpString -> format_directory_string(ST, S); - [] -> not_found + [Val] -> format_asn1_value(Val); + [] -> not_found end. %%-------------------------------------------------------------------------- -- cgit v1.2.1 From 4776943d229918e892c4a3205671d2fc8b2a150f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 16:14:28 +0100 Subject: Cosmetics / specs. --- src/rabbit.erl | 4 +--- src/rabbit_amqqueue.erl | 6 +++--- src/rabbit_binding.erl | 2 +- src/rabbit_exchange.erl | 2 +- src/rabbit_exchange_type_direct.erl | 2 +- src/rabbit_exchange_type_topic.erl | 4 +--- src/rabbit_tests.erl | 2 +- 7 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 2840a5b7..07316138 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -460,9 +460,7 @@ boot_delegate() -> rabbit_sup:start_child(delegate_sup, [Count]). recover() -> - XNames = rabbit_exchange:recover(), - QNames = rabbit_amqqueue:recover(), - rabbit_binding:recover(XNames, QNames). + rabbit_binding:recover(rabbit_exchange:recover(), rabbit_amqqueue:start()). maybe_insert_default_data() -> case rabbit_mnesia:is_db_empty() of diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 34ed88bc..e813d75c 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -16,7 +16,7 @@ -module(rabbit_amqqueue). --export([recover/0, stop/0, declare/5, delete_immediately/1, delete/3, +-export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, @@ -58,7 +58,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(recover/0 :: () -> [rabbit_types:amqqueue()]). +-spec(start/0 :: () -> [rabbit_amqqueue:name()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), @@ -158,7 +158,7 @@ %%---------------------------------------------------------------------------- -recover() -> +start() -> DurableQueues = find_durable_queues(), {ok, BQ} = application:get_env(rabbit, backing_queue_module), ok = BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 5ac9c871..5f120547 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -50,7 +50,7 @@ -opaque(deletions() :: dict()). --spec(recover/2 :: ([rabbit_types:resource()], [rabbit_types:resource()]) -> +-spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) -> 'ok'). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). -spec(add/1 :: (rabbit_types:binding()) -> add_res()). diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 7268b15d..86ce69ef 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> [rabbit_types:resource()]). +-spec(recover/0 :: () -> [rabbit_exchange:name()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 200c2997..4c56a1f8 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -42,7 +42,7 @@ route(#exchange{name = Name}, validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _B) -> ok. +add_bindings(_Tx, _X, _Bs) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index efa5fb52..2c995df8 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -47,9 +47,7 @@ route(#exchange{name = X}, end || RKey <- Routes]). validate(_X) -> ok. - -create(_Tx, _X) -> - ok. +create(_Tx, _X) -> ok. delete(true, #exchange{name = X}, _Bs) -> trie_remove_all_edges(X), diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 995b84d9..e618156b 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2322,7 +2322,7 @@ test_queue_recover() -> after 10000 -> exit(timeout_waiting_for_queue_death) end, rabbit_amqqueue:stop(), - rabbit_amqqueue:recover(), + rabbit_amqqueue:start(), rabbit_amqqueue:with_or_die( QName, fun (Q1 = #amqqueue { pid = QPid1 }) -> -- cgit v1.2.1 From dac6e4ba2648131f4d79f871536c3b79e16d17d9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 16:24:02 +0100 Subject: Split up sync_binding. --- src/rabbit_binding.erl | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 5f120547..508d19bf 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -100,10 +100,8 @@ recover(XNames, QNames) -> XBs = rabbit_misc:table_fold( fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> case should_recover(B, XNameSet, QNameSet) of - true -> {_, Rev} = route_with_reverse(Route), - ok = mnesia:write(rabbit_route, Route, write), - ok = mnesia:write(rabbit_reverse_route, Rev, - write), + true -> ok = sync_transient_binding( + Route, fun mnesia:write/3), rabbit_misc:dict_cons(Src, B, Acc); false -> Acc end @@ -287,16 +285,17 @@ binding_action(Binding = #binding{source = SrcName, Fun(Src, Dst, Binding#binding{args = SortedArgs}) end). -sync_binding(Binding, Durable, Fun) -> - ok = case Durable of - true -> Fun(rabbit_durable_route, - #route{binding = Binding}, write); - false -> ok - end, +sync_binding(Binding, true, Fun) -> + ok = Fun(rabbit_durable_route, #route{binding = Binding}, write), + ok = sync_transient_binding(Binding, Fun); + +sync_binding(Binding, false, Fun) -> + ok = sync_transient_binding(Binding, Fun). + +sync_transient_binding(Binding, Fun) -> {Route, ReverseRoute} = route_with_reverse(Binding), ok = Fun(rabbit_route, Route, write), - ok = Fun(rabbit_reverse_route, ReverseRoute, write), - ok. + ok = Fun(rabbit_reverse_route, ReverseRoute, write). call_with_source_and_destination(SrcName, DstName, Fun) -> SrcTable = table_for_resource(SrcName), -- cgit v1.2.1 From 9f9c7c5eabf3d6915afb0b0b62fe25d04101d50e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 16:35:51 +0100 Subject: rabbit_misc:execute_pre_post_mnesia_tx/1. --- src/rabbit_binding.erl | 5 ++--- src/rabbit_exchange.erl | 5 ++--- src/rabbit_misc.erl | 7 +++++++ 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 508d19bf..8c6732f9 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -106,9 +106,8 @@ recover(XNames, QNames) -> false -> Acc end end, dict:new(), rabbit_durable_route), - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> + rabbit_misc:execute_pre_post_mnesia_tx( + fun (Tx) -> dict:map(fun (XName, Bindings) -> {ok, X} = rabbit_exchange:lookup(XName), rabbit_exchange:callback(X, add_bindings, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 86ce69ef..b39fe32c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,9 +91,8 @@ recover() -> [_] -> Acc end end, [], rabbit_durable_exchange), - rabbit_misc:execute_mnesia_transaction( - fun () -> ok end, - fun (ok, Tx) -> + rabbit_misc:execute_pre_post_mnesia_tx( + fun (Tx) -> [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] end), [XName || #exchange{name = XName} <- Xs]. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 2e9563cf..8927020f 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -34,6 +34,7 @@ -export([with_user/2, with_user_and_vhost/3]). -export([execute_mnesia_transaction/1]). -export([execute_mnesia_transaction/2]). +-export([execute_pre_post_mnesia_tx/1]). -export([execute_mnesia_tx_with_tail/1]). -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). @@ -135,6 +136,7 @@ -spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A). -spec(execute_mnesia_transaction/2 :: (thunk(A), fun ((A, boolean()) -> B)) -> B). +-spec(execute_pre_post_mnesia_tx/1 :: (fun ((boolean()) -> B)) -> B). -spec(execute_mnesia_tx_with_tail/1 :: (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))). -spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok'). @@ -404,6 +406,11 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> Result end), false). +%% Like the above, but without the main body. +execute_pre_post_mnesia_tx(PrePostCommitFun) -> + execute_mnesia_transaction(fun () -> ok end, + fun (ok, Tx) -> PrePostCommitFun(Tx) end). + %% Like execute_mnesia_transaction/2, but TxFun is expected to return a %% TailFun which gets called immediately before and after the tx commit execute_mnesia_tx_with_tail(TxFun) -> -- cgit v1.2.1 From 95f132f8a4324906a108208c37b720c8e811206d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 4 Apr 2011 17:07:12 +0100 Subject: Invoke all the create and add_binding callbacks in the same tx. --- src/rabbit_binding.erl | 4 ++++ src/rabbit_exchange.erl | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8c6732f9..557a8f29 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -108,6 +108,10 @@ recover(XNames, QNames) -> end, dict:new(), rabbit_durable_route), rabbit_misc:execute_pre_post_mnesia_tx( fun (Tx) -> + [begin + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, create, [Tx, X]) + end|| XName <- XNames], dict:map(fun (XName, Bindings) -> {ok, X} = rabbit_exchange:lookup(XName), rabbit_exchange:callback(X, add_bindings, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b39fe32c..acbc6c90 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,10 +91,6 @@ recover() -> [_] -> Acc end end, [], rabbit_durable_exchange), - rabbit_misc:execute_pre_post_mnesia_tx( - fun (Tx) -> - [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] - end), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> -- cgit v1.2.1 From 797571fdd089c1217c8ff12fbf99990038b4dc3a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 11:49:07 +0100 Subject: Remove gratuitous export. --- src/rabbit_prelaunch.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index c8ad7c9c..79deb46c 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -16,7 +16,7 @@ -module(rabbit_prelaunch). --export([start/0, stop/0, duplicate_node_check/1]). +-export([start/0, stop/0]). -define(BaseApps, [rabbit]). -define(ERROR_CODE, 1). -- cgit v1.2.1 From 6bfb13e3561aaf85d2febb60fea3e89f440a3580 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 12:09:13 +0100 Subject: Revert bc5bcde98866 --- src/rabbit_binding.erl | 4 ---- src/rabbit_exchange.erl | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 557a8f29..8c6732f9 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -108,10 +108,6 @@ recover(XNames, QNames) -> end, dict:new(), rabbit_durable_route), rabbit_misc:execute_pre_post_mnesia_tx( fun (Tx) -> - [begin - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, create, [Tx, X]) - end|| XName <- XNames], dict:map(fun (XName, Bindings) -> {ok, X} = rabbit_exchange:lookup(XName), rabbit_exchange:callback(X, add_bindings, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index acbc6c90..b39fe32c 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,6 +91,10 @@ recover() -> [_] -> Acc end end, [], rabbit_durable_exchange), + rabbit_misc:execute_pre_post_mnesia_tx( + fun (Tx) -> + [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] + end), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> -- cgit v1.2.1 From 050dc7df7081db4054191503e6b2dfe2e07ec901 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 12:24:13 +0100 Subject: Recover all the bindings in a single tx. --- src/rabbit_binding.erl | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8c6732f9..563fc0cf 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,15 +97,20 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - XBs = rabbit_misc:table_fold( - fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> - case should_recover(B, XNameSet, QNameSet) of - true -> ok = sync_transient_binding( - Route, fun mnesia:write/3), - rabbit_misc:dict_cons(Src, B, Acc); - false -> Acc - end - end, dict:new(), rabbit_durable_route), + XBs = rabbit_misc:execute_mnesia_transaction( + fun () -> + lists:foldl( + fun (Route = #route{ + binding = B = #binding{source = Src}}, Acc) -> + case should_recover(B, XNameSet, QNameSet) of + true -> ok = sync_transient_binding( + Route, fun mnesia:write/3), + rabbit_misc:dict_cons(Src, B, Acc); + false -> Acc + end + end, dict:new(), + mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])) + end), rabbit_misc:execute_pre_post_mnesia_tx( fun (Tx) -> dict:map(fun (XName, Bindings) -> -- cgit v1.2.1 From b010416a0a2ebf15cae4d4da211486a12f80b2aa Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 13:27:17 +0100 Subject: Recover and invoke callbacks for bindings in the same Tx. --- src/rabbit_binding.erl | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 563fc0cf..7c492778 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -95,32 +95,29 @@ routing_key, arguments]). recover(XNames, QNames) -> - XNameSet = sets:from_list(XNames), - QNameSet = sets:from_list(QNames), XBs = rabbit_misc:execute_mnesia_transaction( fun () -> - lists:foldl( - fun (Route = #route{ - binding = B = #binding{source = Src}}, Acc) -> - case should_recover(B, XNameSet, QNameSet) of - true -> ok = sync_transient_binding( - Route, fun mnesia:write/3), - rabbit_misc:dict_cons(Src, B, Acc); - false -> Acc - end - end, dict:new(), - mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])) + XBs = recover_internal(XNames, QNames), + callback_bindings(true, XBs), + XBs end), - rabbit_misc:execute_pre_post_mnesia_tx( - fun (Tx) -> - dict:map(fun (XName, Bindings) -> - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, add_bindings, - [Tx, X, Bindings]) - end, XBs) - end), + callback_bindings(false, XBs), ok. +recover_internal(XNames, QNames) -> + XNameSet = sets:from_list(XNames), + QNameSet = sets:from_list(QNames), + lists:foldl( + fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> + case should_recover(B, XNameSet, QNameSet) of + true -> ok = sync_transient_binding( + Route, fun mnesia:write/3), + rabbit_misc:dict_cons(Src, B, Acc); + false -> Acc + end + end, dict:new(), + mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])). + should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, XNameSet, QNameSet) -> case mnesia:read({rabbit_route, B}) of @@ -131,6 +128,13 @@ should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, _ -> false end. +callback_bindings(Tx, XBs) -> + dict:map(fun (XName, Bindings) -> + {ok, X} = rabbit_exchange:lookup(XName), + rabbit_exchange:callback(X, add_bindings, + [Tx, X, Bindings]) + end, XBs). + exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> -- cgit v1.2.1 From 8745389ddb413ed3a326f2c4a989f7ad2e7105ce Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Apr 2011 16:57:44 +0100 Subject: UTF8strings come back as binaries. --- src/rabbit_ssl.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl index a3cd2b37..e0defa9e 100644 --- a/src/rabbit_ssl.erl +++ b/src/rabbit_ssl.erl @@ -211,7 +211,8 @@ format_asn1_value(V) -> %% subset of ASCII it is also a subset of UTF-8. The others need %% converting. Fortunately since the Erlang SSL library does the %% decoding for us (albeit into a weird format, see below), we just -%% need to handle encoding into UTF-8. +%% need to handle encoding into UTF-8. Note also that utf8Strings come +%% back as binary. %% %% Note for testing: the default Ubuntu configuration for openssl will %% only create printableString or teletexString types no matter what @@ -225,7 +226,7 @@ format_directory_string(printableString, S) -> S; format_directory_string(teletexString, S) -> utf8_list_from(S); format_directory_string(bmpString, S) -> utf8_list_from(S); format_directory_string(universalString, S) -> utf8_list_from(S); -format_directory_string(utf8String, S) -> S. +format_directory_string(utf8String, S) -> binary_to_list(S). utf8_list_from(S) -> binary_to_list( -- cgit v1.2.1 From 54b9d6589ace4ac2f40b8707dc2dfc63d322b8d6 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 6 Apr 2011 09:41:28 +0100 Subject: Added windows service description --- scripts/rabbitmq-service.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index aa428a8c..5a233b0d 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -227,6 +227,7 @@ set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"! -stopaction "rabbit:stop_and_halt()." ^ -sname !RABBITMQ_NODENAME! ^ !CONSOLE_FLAG! ^ +-comment "A performant, robust and scalable implementation of AMQP" ^ -args "!ERLANG_SERVICE_ARGUMENTS!" > NUL goto END -- cgit v1.2.1 From 3b75c9e4a3e1db534569cbe0e21b4061537f5cbc Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 6 Apr 2011 09:51:22 +0100 Subject: Performant means something else entirely --- scripts/rabbitmq-service.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat index 5a233b0d..b2aa4f58 100644 --- a/scripts/rabbitmq-service.bat +++ b/scripts/rabbitmq-service.bat @@ -227,7 +227,7 @@ set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"! -stopaction "rabbit:stop_and_halt()." ^ -sname !RABBITMQ_NODENAME! ^ !CONSOLE_FLAG! ^ --comment "A performant, robust and scalable implementation of AMQP" ^ +-comment "A robust and scalable messaging broker" ^ -args "!ERLANG_SERVICE_ARGUMENTS!" > NUL goto END -- cgit v1.2.1 From 22bdb4ffd079fb5aa842e1673de888be46029ab0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 6 Apr 2011 11:33:14 +0100 Subject: It is not an error if the file does not exist --- src/rabbit_msg_store.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 65688142..3f4162cd 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -1891,7 +1891,10 @@ copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl, force_recovery(BaseDir, Store) -> Dir = filename:join(BaseDir, atom_to_list(Store)), - ok = file:delete(filename:join(Dir, ?CLEAN_FILENAME)), + case file:delete(filename:join(Dir, ?CLEAN_FILENAME)) of + ok -> ok; + {error, enoent} -> ok + end, recover_crashed_compactions(BaseDir), ok. -- cgit v1.2.1 From f2d3b189ad34e01e80841a48552622577cb069f8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:01:46 +0100 Subject: Split back up into lots of little txs. --- src/rabbit_binding.erl | 49 ++++++++++++++++++------------------------------- src/rabbit_exchange.erl | 11 ++++++----- src/rabbit_misc.erl | 12 +++++++----- 3 files changed, 31 insertions(+), 41 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7c492778..c71a21f1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -95,45 +95,32 @@ routing_key, arguments]). recover(XNames, QNames) -> - XBs = rabbit_misc:execute_mnesia_transaction( - fun () -> - XBs = recover_internal(XNames, QNames), - callback_bindings(true, XBs), - XBs - end), - callback_bindings(false, XBs), - ok. - -recover_internal(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - lists:foldl( - fun (Route = #route{binding = B = #binding{source = Src}}, Acc) -> + rabbit_misc:table_fold( + fun (Route = #route{binding = B}, _Acc) -> case should_recover(B, XNameSet, QNameSet) of true -> ok = sync_transient_binding( Route, fun mnesia:write/3), - rabbit_misc:dict_cons(Src, B, Acc); - false -> Acc + B; + false -> none end - end, dict:new(), - mnesia:select(rabbit_durable_route, [{'$1', [], ['$1']}])). + end, + fun (none, _Tx) -> + ok; + (B = #binding{source = Src}, Tx) -> + {ok, X} = rabbit_exchange:lookup(Src), + rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) + end, + none, rabbit_durable_route), + ok. -should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, +should_recover(#binding{destination = Dst = #resource{ kind = Kind }}, XNameSet, QNameSet) -> - case mnesia:read({rabbit_route, B}) of - [] -> sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end); - _ -> false - end. - -callback_bindings(Tx, XBs) -> - dict:map(fun (XName, Bindings) -> - {ok, X} = rabbit_exchange:lookup(XName), - rabbit_exchange:callback(X, add_bindings, - [Tx, X, Bindings]) - end, XBs). + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end). exists(Binding) -> binding_action( diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b39fe32c..bc2d5b29 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -90,11 +90,12 @@ recover() -> [X | Acc]; [_] -> Acc end - end, [], rabbit_durable_exchange), - rabbit_misc:execute_pre_post_mnesia_tx( - fun (Tx) -> - [rabbit_exchange:callback(X, create, [Tx, X]) || X <- Xs] - end), + end, + fun (Acc = [X | _], Tx) -> + rabbit_exchange:callback(X, create, [Tx, X]), + Acc + end, + [], rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 8927020f..28c4596e 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -39,7 +39,7 @@ -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). -export([upmap/2, map_in_order/2]). --export([table_fold/3]). +-export([table_fold/4]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). @@ -148,7 +148,8 @@ -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_fold/3 :: (fun ((any(), A) -> A), A, atom()) -> A). +-spec(table_fold/4 :: (fun ((any(), A) -> A), fun ((A, boolean()) -> A), A, + atom()) -> A). -spec(dirty_read_all/1 :: (atom()) -> [any()]). -spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> 'ok' | 'aborted'). @@ -473,14 +474,15 @@ map_in_order(F, L) -> %% around the lot. %% %% We ignore entries that have been modified or removed. -table_fold(F, Acc0, TableName) -> +table_fold(Fun, PrePostCommitFun, Acc0, TableName) -> lists:foldl( fun (E, Acc) -> execute_mnesia_transaction( fun () -> case mnesia:match_object(TableName, E, read) of [] -> Acc; - _ -> F(E, Acc) + _ -> Fun(E, Acc) end - end) + end, + PrePostCommitFun) end, Acc0, dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From dc5425baa1fbb1d32bbb08ad0d72f9e3788fbdc9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:04:07 +0100 Subject: No longer needed. --- src/rabbit_misc.erl | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 28c4596e..324ec534 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -34,7 +34,6 @@ -export([with_user/2, with_user_and_vhost/3]). -export([execute_mnesia_transaction/1]). -export([execute_mnesia_transaction/2]). --export([execute_pre_post_mnesia_tx/1]). -export([execute_mnesia_tx_with_tail/1]). -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). @@ -136,7 +135,6 @@ -spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A). -spec(execute_mnesia_transaction/2 :: (thunk(A), fun ((A, boolean()) -> B)) -> B). --spec(execute_pre_post_mnesia_tx/1 :: (fun ((boolean()) -> B)) -> B). -spec(execute_mnesia_tx_with_tail/1 :: (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))). -spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok'). @@ -407,11 +405,6 @@ execute_mnesia_transaction(TxFun, PrePostCommitFun) -> Result end), false). -%% Like the above, but without the main body. -execute_pre_post_mnesia_tx(PrePostCommitFun) -> - execute_mnesia_transaction(fun () -> ok end, - fun (ok, Tx) -> PrePostCommitFun(Tx) end). - %% Like execute_mnesia_transaction/2, but TxFun is expected to return a %% TailFun which gets called immediately before and after the tx commit execute_mnesia_tx_with_tail(TxFun) -> -- cgit v1.2.1 From 6f9e91a2d0d38a1bcd680acbb4e2782d975c6a5d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:17:12 +0100 Subject: Check for presence of binding in rabbit_durable_route on add. --- src/rabbit_binding.erl | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index c71a21f1..d293c812 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -142,14 +142,11 @@ add(Binding, InnerFun) -> case InnerFun(Src, Dst) of ok -> case mnesia:read({rabbit_route, B}) of - [] -> ok = sync_binding(B, all_durable([Src, Dst]), - fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback( - Src, add_bindings, - [Tx, Src, [B]]), - rabbit_event:notify_if( - not Tx, binding_created, info(B)) + [] -> case mnesia:read({rabbit_durable_route, B}) of + [] -> add_internal(Src, Dst, B); + %% Binding exists, to queue on node which + %% is in the middle of starting + [_] -> rabbit_misc:const(not_found) end; [_] -> fun rabbit_misc:const_ok/1 end; @@ -158,6 +155,13 @@ add(Binding, InnerFun) -> end end). +add_internal(Src, Dst, B) -> + ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback(Src, add_bindings, [Tx, Src, [B]]), + rabbit_event:notify_if(not Tx, binding_created, info(B)) + end. + remove(Binding, InnerFun) -> binding_action( Binding, -- cgit v1.2.1 From 4002f7243e0f392d6ceba4f779732f160321179e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:23:04 +0100 Subject: Check the route still exists. --- src/rabbit_binding.erl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index d293c812..7131ab21 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -115,12 +115,15 @@ recover(XNames, QNames) -> none, rabbit_durable_route), ok. -should_recover(#binding{destination = Dst = #resource{ kind = Kind }}, +should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, XNameSet, QNameSet) -> - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end). + case mnesia:read({rabbit_durable_route, B}) of + [] -> false; %% It disappeared between getting the list and here + [_] -> sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) + end. exists(Binding) -> binding_action( -- cgit v1.2.1 From a5b4048d515d9e2b9fad6ed45785f1bfb7c60d1c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:36:58 +0100 Subject: Refactor a bit, and only check rabbit_durable_route if the binding might be durable. --- src/rabbit_binding.erl | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7131ab21..2ae7c973 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -143,26 +143,30 @@ add(Binding, InnerFun) -> %% in general, we want to fail on that in preference to %% anything else case InnerFun(Src, Dst) of - ok -> - case mnesia:read({rabbit_route, B}) of - [] -> case mnesia:read({rabbit_durable_route, B}) of - [] -> add_internal(Src, Dst, B); - %% Binding exists, to queue on node which - %% is in the middle of starting - [_] -> rabbit_misc:const(not_found) - end; - [_] -> fun rabbit_misc:const_ok/1 - end; - {error, _} = Err -> - rabbit_misc:const(Err) + ok -> add(Src, Dst, B); + {error, _} = Err -> rabbit_misc:const(Err) end end). -add_internal(Src, Dst, B) -> - ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback(Src, add_bindings, [Tx, Src, [B]]), - rabbit_event:notify_if(not Tx, binding_created, info(B)) +add(Src, Dst, B) -> + case mnesia:read({rabbit_route, B}) of + [] -> Durable = all_durable([Src, Dst]), + case (not Durable orelse + mnesia:read({rabbit_durable_route, B}) =:= []) of + true -> + ok = sync_binding(B, Durable, fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback(Src, add_bindings, + [Tx, Src, [B]]), + rabbit_event:notify_if(not Tx, binding_created, + info(B)) + end; + %% Binding exists, to queue on node which + %% is in the middle of starting + false -> + rabbit_misc:const(not_found) + end; + [_] -> fun rabbit_misc:const_ok/1 end. remove(Binding, InnerFun) -> -- cgit v1.2.1 From 5689000c5a62bcb866e352f6f5497a38350b1ffc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:44:56 +0100 Subject: Matthias prefers this. --- src/rabbit_binding.erl | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 2ae7c973..e02427bc 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -98,8 +98,16 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), rabbit_misc:table_fold( - fun (Route = #route{binding = B}, _Acc) -> - case should_recover(B, XNameSet, QNameSet) of + fun (Route = #route{binding = B = + #binding{destination = Dst = + #resource{kind = Kind}}}, _Acc) -> + %% The check against rabbit_durable_route is in case it + %% disappeared between getting the list and here + case (not mnesia:read({rabbit_durable_route, B}) =:= [] andalso + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end)) of true -> ok = sync_transient_binding( Route, fun mnesia:write/3), B; @@ -115,16 +123,6 @@ recover(XNames, QNames) -> none, rabbit_durable_route), ok. -should_recover(B = #binding{destination = Dst = #resource{ kind = Kind }}, - XNameSet, QNameSet) -> - case mnesia:read({rabbit_durable_route, B}) of - [] -> false; %% It disappeared between getting the list and here - [_] -> sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end) - end. - exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> -- cgit v1.2.1 From d5248d1d98a27a5ee030157efee4cffa623e3388 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:48:04 +0100 Subject: Acc may be empty. --- src/rabbit_exchange.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index bc2d5b29..a2684782 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -91,7 +91,9 @@ recover() -> [_] -> Acc end end, - fun (Acc = [X | _], Tx) -> + fun ([], _Tx) -> + []; + (Acc = [X | _], Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), Acc end, -- cgit v1.2.1 From 3aa8f702f49d253526b9571cc2a63bca0a0ee516 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 15:59:54 +0100 Subject: Damn priorities. --- src/rabbit_binding.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index e02427bc..b7bebc39 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -103,7 +103,7 @@ recover(XNames, QNames) -> #resource{kind = Kind}}}, _Acc) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - case (not mnesia:read({rabbit_durable_route, B}) =:= [] andalso + case (not (mnesia:read({rabbit_durable_route, B}) =:= []) andalso sets:is_element(Dst, case Kind of exchange -> XNameSet; queue -> QNameSet -- cgit v1.2.1 From 03677a7ac3c173a83f5e29123d7cafde96cb4ef6 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Apr 2011 17:08:54 +0100 Subject: table_fold -> table_map --- src/rabbit_binding.erl | 21 +++++++++++---------- src/rabbit_exchange.erl | 18 ++++++++---------- src/rabbit_misc.erl | 27 +++++++++++++++------------ 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b7bebc39..b2d84143 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,17 +97,17 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - rabbit_misc:table_fold( + rabbit_misc:table_map( fun (Route = #route{binding = B = #binding{destination = Dst = - #resource{kind = Kind}}}, _Acc) -> + #resource{kind = Kind}}}) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - case (not (mnesia:read({rabbit_durable_route, B}) =:= []) andalso - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end)) of + case mnesia:read({rabbit_durable_route, B}) =/= [] andalso + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) of true -> ok = sync_transient_binding( Route, fun mnesia:write/3), B; @@ -115,12 +115,13 @@ recover(XNames, QNames) -> end end, fun (none, _Tx) -> - ok; + none; (B = #binding{source = Src}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) + rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]), + B end, - none, rabbit_durable_route), + rabbit_durable_route), ok. exists(Binding) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a2684782..2fe98e4b 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -83,21 +83,19 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_fold( - fun (X = #exchange{name = XName}, Acc) -> + Xs = rabbit_misc:table_map( + fun (X = #exchange{name = XName}) -> case mnesia:read({rabbit_exchange, XName}) of [] -> ok = mnesia:write(rabbit_exchange, X, write), - [X | Acc]; - [_] -> Acc + X; + [_] -> none end end, - fun ([], _Tx) -> - []; - (Acc = [X | _], Tx) -> - rabbit_exchange:callback(X, create, [Tx, X]), - Acc + fun (none, _Tx) -> none; + (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), + X end, - [], rabbit_durable_exchange), + rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. callback(#exchange{type = XType}, Fun, Args) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 324ec534..6bebf005 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -38,7 +38,7 @@ -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). -export([upmap/2, map_in_order/2]). --export([table_fold/4]). +-export([table_map/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). @@ -146,8 +146,7 @@ -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_fold/4 :: (fun ((any(), A) -> A), fun ((A, boolean()) -> A), A, - atom()) -> A). +-spec(table_map/3 :: (fun ((A) -> A), fun ((A, boolean()) -> A), atom()) -> A). -spec(dirty_read_all/1 :: (atom()) -> [any()]). -spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> 'ok' | 'aborted'). @@ -467,16 +466,20 @@ map_in_order(F, L) -> %% around the lot. %% %% We ignore entries that have been modified or removed. -table_fold(Fun, PrePostCommitFun, Acc0, TableName) -> +table_map(Fun, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, read) of - [] -> Acc; - _ -> Fun(E, Acc) - end - end, - PrePostCommitFun) - end, Acc0, dirty_read_all(TableName)). + fun (E, Acc) -> case execute_mnesia_transaction( + fun () -> case mnesia:match_object(TableName, E, + read) of + [] -> Acc; + _ -> Fun(E) + end + end, + PrePostCommitFun) of + none -> Acc; + Res -> [Res | Acc] + end + end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> mnesia:dirty_select(TableName, [{'$1',[],['$1']}]). -- cgit v1.2.1 From 7452bf152ed2030df9071b6ae9a5ffacdc5654fb Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 7 Apr 2011 11:22:25 +0100 Subject: tweak --- src/rabbit_reader.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 6763bdb9..79210268 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -213,7 +213,7 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, rabbit_event:init_stats_timer(), channel_sup_sup_pid = ChannelSupSupPid, start_heartbeat_fun = StartHeartbeatFun, - buf = [<<>>], + buf = [], auth_mechanism = none, auth_state = none }, -- cgit v1.2.1 From ac8ee674af3f80b979031831fd2b43a0aab9e427 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 11:24:35 +0100 Subject: Tested debitrot; works again. --- src/rabbit_log.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 075a5243..25debc42 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -91,8 +91,9 @@ tap_trace_in(Message = #basic_message{exchange_name = #resource{ fun (TraceExchangeBin) -> QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || P <- QPids], - QNames = [N || [{name, #resource{name = N}}] <- QInfos], - QNamesStr = list_to_binary(rabbit_misc:intersperse(",", QNames)), + QNames = [binary_to_list(N) || + [{name, #resource{name = N}}] <- QInfos], + QNamesStr = list_to_binary(string:join(QNames, ",")), EncodedMessage = message_to_table(Message), maybe_inject(TraceExchangeBin, VHostBin, XNameBin, <<"publish">>, XNameBin, @@ -141,12 +142,11 @@ maybe_inject(TraceExchangeBin, VHostBin, OriginalExchangeBin, TraceExchangeBin =:= OriginalExchangeBin -> ok; true -> - rabbit_exchange:simple_publish( - false, - false, + ContentTypeBin = <<"application/x-amqp-table; version=0-9-1">>, + rabbit_basic:publish( rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), <>, - <<"application/x-amqp-table; version=0-8">>, + #'P_basic'{content_type = ContentTypeBin}, rabbit_binary_generator:generate_table(Table)), ok end. @@ -169,7 +169,7 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, app_id = AppId}, payload_fragments_rev = PFR} = rabbit_binary_parser:ensure_content_decoded(Content), - Headers = prune_undefined( + Headers1 = prune_undefined( [{<<"content_type">>, longstr, ContentType}, {<<"content_encoding">>, longstr, ContentEncoding}, {<<"headers">>, table, Headers}, @@ -185,7 +185,7 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, {<<"app_id">>, longstr, AppId}]), [{<<"exchange_name">>, longstr, XName}, {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, - {<<"headers">>, table, Headers}, + {<<"headers">>, table, Headers1}, {<<"body">>, longstr, list_to_binary(lists:reverse(PFR))}]. prune_undefined(Fields) -> -- cgit v1.2.1 From 7de37afd01a5aa913a7bced14d1e0e00432793e3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 7 Apr 2011 12:18:32 +0100 Subject: Validate_msg becomes is_duplicate, which makes much more sense --- include/rabbit_backing_queue_spec.hrl | 4 ++-- src/rabbit_amqqueue_process.erl | 21 +++++++++++++-------- src/rabbit_backing_queue.erl | 6 ++++-- src/rabbit_mirror_queue_master.erl | 24 ++++++++++++------------ src/rabbit_variable_queue.erl | 4 ++-- 5 files changed, 33 insertions(+), 26 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index b85e4ad6..f5e441dc 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -71,5 +71,5 @@ -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((atom(), A) -> A), state()) -> state()). --spec(validate_message/2 :: (rabbit_types:basic_message(), state()) -> - {'invalid' | 'valid', state()}). +-spec(is_duplicate/2 :: (rabbit_types:basic_message(), state()) -> + {boolean(), state()}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 9e54312f..575d69f4 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -523,12 +523,17 @@ attempt_delivery(Delivery = #delivery{txn = none, immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok end, - case BQ:validate_message(Message, BQS) of - {invalid, BQS1} -> - %% if the message is invalid, we pretend it was delivered - %% fine + case BQ:is_duplicate(Message, BQS) of + {true, BQS1} -> + %% if the message has previously been seen by the BQ then + %% it must have been seen under the same circumstances as + %% now: i.e. if it is now a deliver_immediately then it + %% must have been before. Consequently, if the BQ has seen + %% it before then it's safe to assume it's been delivered + %% (i.e. the only thing that cares about that is + %% deliver_immediately). {true, Confirm, State#q{backing_queue_state = BQS1}}; - {valid, BQS1} -> + {false, BQS1} -> PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = fun (AckRequired, false, @@ -555,10 +560,10 @@ attempt_delivery(Delivery = #delivery{txn = Txn, message = Message}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> Confirm = should_confirm_message(Delivery, State), - case BQ:validate_message(Message, BQS) of - {invalid, BQS1} -> + case BQ:is_duplicate(Message, BQS) of + {true, BQS1} -> {true, Confirm, State#q{backing_queue_state = BQS1}}; - {valid, BQS1} -> + {false, BQS1} -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS2 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, BQS1), diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 7087be91..dfa5500e 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -171,8 +171,10 @@ behaviour_info(callbacks) -> %% components need to pass functions into the backing queue. {invoke, 3}, - %% TODO: document me - {validate_message, 2} + %% Called prior to a publish or publish_delivered call. Allows + %% the BQ to signal that it's already seen this message and thus + %% the message should be dropped. + {is_duplicate, 2} ]; behaviour_info(_Other) -> undefined. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 0ca73f03..42af4e51 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, drain_confirmed/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, validate_message/2]). + status/1, invoke/3, is_duplicate/2]). -export([start/1, stop/0]). @@ -274,11 +274,11 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. -validate_message(Message = #basic_message { id = MsgId }, - State = #state { seen_status = SS, - backing_queue = BQ, - backing_queue_state = BQS, - confirmed = Confirmed }) -> +is_duplicate(Message = #basic_message { id = MsgId }, + State = #state { seen_status = SS, + backing_queue = BQ, + backing_queue_state = BQS, + confirmed = Confirmed }) -> %% Here, we need to deal with the possibility that we're about to %% receive a message that we've already seen when we were a slave %% (we received it via gm). Thus if we do receive such message now @@ -297,10 +297,10 @@ validate_message(Message = #basic_message { id = MsgId }, %% confirmation is waiting. amqqueue_process will have, in %% its msg_id_to_channel mapping, the entry for dealing %% with the confirm when that comes back in (it's added - %% immediately prior to calling validate_message). The msg - %% is invalid. We will not see this again, nor will we be + %% immediately after calling is_duplicate). The msg is + %% invalid. We will not see this again, nor will we be %% further involved in confirming this message, so erase. - {invalid, State #state { seen_status = dict:erase(MsgId, SS) }}; + {true, State #state { seen_status = dict:erase(MsgId, SS) }}; {ok, confirmed} -> %% It got published when we were a slave via gm, and %% confirmed some time after that (maybe even after @@ -309,7 +309,7 @@ validate_message(Message = #basic_message { id = MsgId }, %% msg_seq_no was (and thus confirm as a slave). So we %% need to confirm now. As above, amqqueue_process will %% have the entry for the msg_id_to_channel mapping added - %% immediately prior to calling validate_message/2. - {invalid, State #state { seen_status = dict:erase(MsgId, SS), - confirmed = [MsgId | Confirmed] }} + %% immediately after calling is_duplicate/2. + {true, State #state { seen_status = dict:erase(MsgId, SS), + confirmed = [MsgId | Confirmed] }} end. diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 486d30fd..a8f9974a 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, validate_message/2, multiple_routing_keys/0]). + status/1, invoke/3, is_duplicate/2, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -886,7 +886,7 @@ status(#vqstate { invoke(?MODULE, Fun, State) -> Fun(?MODULE, State). -validate_message(_Msg, State) -> {valid, State}. +is_duplicate(_Msg, State) -> {false, State}. %%---------------------------------------------------------------------------- %% Minor helpers -- cgit v1.2.1 From 2a9b978718c8fac6941f79cb2bf13635d48f44fa Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 12:40:31 +0100 Subject: Move the inbound tap to before routing but after validation. Remove queues from inbound tap. --- src/rabbit_channel.erl | 3 +-- src/rabbit_log.erl | 13 +++---------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index d8a7e3d6..e3dc47dc 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -604,6 +604,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, end, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> + rabbit_log:tap_trace_in(Message), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -612,8 +613,6 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, State2 = process_routing_result(RoutingRes, DeliveredQPids, ExchangeName, MsgSeqNo, Message, State1), - %% TODO is this in the right place? - rabbit_log:tap_trace_in(Message, DeliveredQPids), maybe_incr_stats([{ExchangeName, 1} | [{{QPid, ExchangeName}, 1} || QPid <- DeliveredQPids]], publish, State2), diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 25debc42..18460bdb 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -26,7 +26,7 @@ -export([debug/1, debug/2, message/4, info/1, info/2, warning/1, warning/2, error/1, error/2]). --export([tap_trace_in/2, tap_trace_out/3]). +-export([tap_trace_in/1, tap_trace_out/3]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). @@ -84,21 +84,14 @@ error(Fmt, Args) when is_list(Args) -> tap_trace_in(Message = #basic_message{exchange_name = #resource{ virtual_host = VHostBin, - name = XNameBin}}, - QPids) -> + name = XNameBin}}) -> check_trace( VHostBin, fun (TraceExchangeBin) -> - QInfos = [rabbit_amqqueue:info(#amqqueue{pid = P}, [name]) || - P <- QPids], - QNames = [binary_to_list(N) || - [{name, #resource{name = N}}] <- QInfos], - QNamesStr = list_to_binary(string:join(QNames, ",")), EncodedMessage = message_to_table(Message), maybe_inject(TraceExchangeBin, VHostBin, XNameBin, <<"publish">>, XNameBin, - [{<<"queue_names">>, longstr, QNamesStr}, - {<<"message">>, table, EncodedMessage}]) + [{<<"message">>, table, EncodedMessage}]) end). tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, -- cgit v1.2.1 From 93b89d2d074d794329c2b86ec6732b18beeac866 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 12:58:27 +0100 Subject: Make the generated messages somewhat more AMQP-ish by mapping payload to payload and envelope to headers. Thus lose our new mimetype. --- src/rabbit_log.erl | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 18460bdb..9dbf906a 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -88,10 +88,9 @@ tap_trace_in(Message = #basic_message{exchange_name = #resource{ check_trace( VHostBin, fun (TraceExchangeBin) -> - EncodedMessage = message_to_table(Message), + {EncodedMetadata, Payload} = message_to_table(Message), maybe_inject(TraceExchangeBin, VHostBin, XNameBin, - <<"publish">>, XNameBin, - [{<<"message">>, table, EncodedMessage}]) + <<"publish">>, XNameBin, EncodedMetadata, Payload) end). tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, @@ -104,11 +103,11 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, VHostBin, fun (TraceExchangeBin) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - EncodedMessage = message_to_table(Message), + {EncodedMetadata, Payload} = message_to_table(Message), Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later {<<"queue_msg_number">>, signedint, QMsgId}, - {<<"redelivered">>, signedint, RedeliveredNum}, - {<<"message">>, table, EncodedMessage}], + {<<"redelivered">>, signedint, RedeliveredNum}] + ++ EncodedMetadata, Fields = case ConsumerTagOrNone of none -> Fields0; @@ -117,7 +116,7 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, | Fields0] end, maybe_inject(TraceExchangeBin, VHostBin, XNameBin, - <<"deliver">>, QNameBin, Fields) + <<"deliver">>, QNameBin, Fields, Payload) end). check_trace(VHostBin, F) -> @@ -130,17 +129,16 @@ check_trace(VHostBin, F) -> end. maybe_inject(TraceExchangeBin, VHostBin, OriginalExchangeBin, - RKPrefix, RKSuffix, Table) -> + RKPrefix, RKSuffix, Table, Payload) -> if TraceExchangeBin =:= OriginalExchangeBin -> ok; true -> - ContentTypeBin = <<"application/x-amqp-table; version=0-9-1">>, rabbit_basic:publish( rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), <>, - #'P_basic'{content_type = ContentTypeBin}, - rabbit_binary_generator:generate_table(Table)), + #'P_basic'{headers = Table}, + Payload), ok end. @@ -176,10 +174,10 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, {<<"type">>, longstr, Type}, {<<"user_id">>, longstr, UserId}, {<<"app_id">>, longstr, AppId}]), - [{<<"exchange_name">>, longstr, XName}, - {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, - {<<"headers">>, table, Headers1}, - {<<"body">>, longstr, list_to_binary(lists:reverse(PFR))}]. + {[{<<"exchange_name">>, longstr, XName}, + {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, + {<<"headers">>, table, Headers1}], + list_to_binary(lists:reverse(PFR))}. prune_undefined(Fields) -> [F || F = {_, _, Value} <- Fields, -- cgit v1.2.1 From 93674ad1716b7c5dd07bcbc255599d4e3fd12828 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 7 Apr 2011 13:01:17 +0100 Subject: get heartbeat monitor to pause when it should On 'default' we kick off another prim_inet:async_recv *before* handling the frame we've just received. This is done for performance reasons - essentially we are reading ahead - and leads to the following sequence of events: 1. receive memory alarm -> change state to 'blocking' 2. receive a 'publish' method frame 3. kick off another prim_inet:async_recv 4. handle frame, detecting that it is a 'publish' frame and thus changing the state to 'blocked' 5. receive the frame header for another frame (e.g. the message header, or could be something on another channel, or a heartbeat) 6. since the state is 'blocked' and we pause the heartbeat monitor and *don't* kick off another prim_inet:async_recv On this branch we don't read ahead since a) that would complicate the logic a fair bit, and b) we could end up draining a fair chunk of data from the socket, rather than just a frame header. As a result we need to make sure the heartbeat monitor gets paused as soon as we transition to the 'blocked' state. --- src/rabbit_reader.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 79210268..e210dba1 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -521,8 +521,8 @@ handle_input({frame_payload, Type, Channel, PayloadSize}, PayloadAndMarker, State) -> case PayloadAndMarker of <> -> - handle_frame(Type, Channel, Payload, - switch_callback(State, frame_header, 7)); + switch_callback(handle_frame(Type, Channel, Payload, State), + frame_header, 7); _ -> throw({bad_payload, Type, Channel, PayloadSize, PayloadAndMarker}) end; -- cgit v1.2.1 From 89e79a7b117de45df0d013b56a70d151991995c2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 13:01:43 +0100 Subject: Don't expose queue message ID. --- src/rabbit_log.erl | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 9dbf906a..aa2e990c 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -93,7 +93,7 @@ tap_trace_in(Message = #basic_message{exchange_name = #resource{ <<"publish">>, XNameBin, EncodedMetadata, Payload) end). -tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, +tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, Message = #basic_message{exchange_name = #resource{ virtual_host = VHostBin, name = XNameBin}}}, @@ -104,9 +104,8 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, QMsgId, Redelivered, fun (TraceExchangeBin) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, {EncodedMetadata, Payload} = message_to_table(Message), - Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later - {<<"queue_msg_number">>, signedint, QMsgId}, - {<<"redelivered">>, signedint, RedeliveredNum}] + Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later + {<<"redelivered">>, signedint, RedeliveredNum}] ++ EncodedMetadata, Fields = case ConsumerTagOrNone of none -> -- cgit v1.2.1 From 737f6b01d1de1e2d8b2e2e7b6120af674165e9f9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 13:12:48 +0100 Subject: In a cluster it may be useful to know which node things happened on. --- src/rabbit_log.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index aa2e990c..ba3bd234 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -175,7 +175,8 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, {<<"app_id">>, longstr, AppId}]), {[{<<"exchange_name">>, longstr, XName}, {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, - {<<"headers">>, table, Headers1}], + {<<"headers">>, table, Headers1}, + {<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], list_to_binary(lists:reverse(PFR))}. prune_undefined(Fields) -> -- cgit v1.2.1 From 176769e7112ab3b311ab0cec60d944ca267709f3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 7 Apr 2011 13:29:28 +0100 Subject: Add BQ:discard, correct BQ:is_duplicate, finally fix the last bits of immediate delivery, though hopefully in a way which has not leaked through to the lower layers... --- include/rabbit_backing_queue_spec.hrl | 3 +- src/rabbit_amqqueue_process.erl | 39 ++++++++++++-------- src/rabbit_backing_queue.erl | 13 +++++-- src/rabbit_mirror_queue_master.erl | 18 +++++++--- src/rabbit_mirror_queue_slave.erl | 68 +++++++++++++++++++++++++++++------ src/rabbit_variable_queue.erl | 5 ++- 6 files changed, 112 insertions(+), 34 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index f5e441dc..b0c5f13b 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -72,4 +72,5 @@ -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((atom(), A) -> A), state()) -> state()). -spec(is_duplicate/2 :: (rabbit_types:basic_message(), state()) -> - {boolean(), state()}). + {'false'|'published'|'discarded', state()}). +-spec(discard/3 :: (rabbit_types:basic_message(), pid(), state()) -> state()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 575d69f4..79f6472d 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -524,15 +524,6 @@ attempt_delivery(Delivery = #delivery{txn = none, _ -> ok end, case BQ:is_duplicate(Message, BQS) of - {true, BQS1} -> - %% if the message has previously been seen by the BQ then - %% it must have been seen under the same circumstances as - %% now: i.e. if it is now a deliver_immediately then it - %% must have been before. Consequently, if the BQ has seen - %% it before then it's safe to assume it's been delivered - %% (i.e. the only thing that cares about that is - %% deliver_immediately). - {true, Confirm, State#q{backing_queue_state = BQS1}}; {false, BQS1} -> PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = @@ -553,7 +544,17 @@ attempt_delivery(Delivery = #delivery{txn = none, {Delivered, State2} = deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State#q{backing_queue_state = BQS1}), - {Delivered, Confirm, State2} + {Delivered, Confirm, State2}; + {Duplicate, BQS1} -> + %% if the message has previously been seen by the BQ then + %% it must have been seen under the same circumstances as + %% now: i.e. if it is now a deliver_immediately then it + %% must have been before. + Delivered = case Duplicate of + published -> true; + discarded -> false + end, + {Delivered, Confirm, State#q{backing_queue_state = BQS1}} end; attempt_delivery(Delivery = #delivery{txn = Txn, sender = ChPid, @@ -561,13 +562,17 @@ attempt_delivery(Delivery = #delivery{txn = Txn, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> Confirm = should_confirm_message(Delivery, State), case BQ:is_duplicate(Message, BQS) of - {true, BQS1} -> - {true, Confirm, State#q{backing_queue_state = BQS1}}; {false, BQS1} -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS2 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, BQS1), - {true, Confirm, State#q{backing_queue_state = BQS2}} + {true, Confirm, State#q{backing_queue_state = BQS2}}; + {Duplicate, BQS1} -> + Delivered = case Duplicate of + published -> true; + discarded -> false + end, + {Delivered, Confirm, State#q{backing_queue_state = BQS1}} end. deliver_or_enqueue(Delivery = #delivery{message = Message}, State) -> @@ -721,6 +726,12 @@ rollback_transaction(Txn, C, State = #q{backing_queue = BQ, subtract_acks(A, B) when is_list(B) -> lists:foldl(fun sets:del_element/2, A, B). +discard_delivery(#delivery{sender = ChPid, + message = Message}, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + State#q{backing_queue_state = BQ:discard(Message, ChPid, BQS)}. + reset_msg_expiry_fun(TTL) -> fun(MsgProps) -> MsgProps#message_properties{expiry = calculate_msg_expiry(TTL)} @@ -910,7 +921,7 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), reply(Delivered, case Delivered of true -> maybe_record_confirm_message(Confirm, State1); - false -> State1 + false -> discard_delivery(Delivery, State1) end); handle_call({deliver, Delivery}, From, State) -> diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index dfa5500e..0bbbd559 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -172,9 +172,16 @@ behaviour_info(callbacks) -> {invoke, 3}, %% Called prior to a publish or publish_delivered call. Allows - %% the BQ to signal that it's already seen this message and thus - %% the message should be dropped. - {is_duplicate, 2} + %% the BQ to signal that it's already seen this message (and in + %% what capacity - i.e. was it published previously or discarded + %% previously) and thus the message should be dropped. + {is_duplicate, 2}, + + %% Called to inform the BQ about messages which have reached the + %% queue, but are not going to be further passed to BQ for some + %% reason. Note that this is not invoked for messages for which + %% BQ:is_duplicate/2 has already returned {true, BQS}. + {discard, 3} ]; behaviour_info(_Other) -> undefined. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 42af4e51..b0a22edd 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, drain_confirmed/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, is_duplicate/2]). + status/1, invoke/3, is_duplicate/2, discard/3]). -export([start/1, stop/0]). @@ -150,6 +150,7 @@ drain_confirmed(State = #state { backing_queue = BQ, {MsgIds1, SS1} = lists:foldl( fun (MsgId, {MsgIdsN, SSN}) -> + %% We will never see 'discarded' here case dict:find(MsgId, SSN) of error -> {[MsgId | MsgIdsN], SSN}; @@ -300,7 +301,7 @@ is_duplicate(Message = #basic_message { id = MsgId }, %% immediately after calling is_duplicate). The msg is %% invalid. We will not see this again, nor will we be %% further involved in confirming this message, so erase. - {true, State #state { seen_status = dict:erase(MsgId, SS) }}; + {published, State #state { seen_status = dict:erase(MsgId, SS) }}; {ok, confirmed} -> %% It got published when we were a slave via gm, and %% confirmed some time after that (maybe even after @@ -310,6 +311,15 @@ is_duplicate(Message = #basic_message { id = MsgId }, %% need to confirm now. As above, amqqueue_process will %% have the entry for the msg_id_to_channel mapping added %% immediately after calling is_duplicate/2. - {true, State #state { seen_status = dict:erase(MsgId, SS), - confirmed = [MsgId | Confirmed] }} + {published, State #state { seen_status = dict:erase(MsgId, SS), + confirmed = [MsgId | Confirmed] }}; + {ok, discarded} -> + {discarded, State #state { seen_status = dict:erase(MsgId, SS) }} end. + +discard(Msg = #basic_message {}, ChPid, + State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {discard, ChPid, Msg}), + State#state{backing_queue_state = BQ:discard(Msg, ChPid, BQS)}. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 4a9dc1fe..628135b1 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -313,6 +313,7 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) -> {MS1, CMs} = lists:foldl( fun (MsgId, {MSN, CMsN} = Acc) -> + %% We will never see 'discarded' here case dict:find(MsgId, MSN) of error -> %% If it needed confirming, it'll have @@ -395,21 +396,25 @@ promote_me(From, #state { q = Q, %% %% MS contains the following three entry types: %% - %% {published, ChPid}: + %% a) {published, ChPid}: %% published via gm only; pending arrival of publication from %% channel, maybe pending confirm. %% - %% {published, ChPid, MsgSeqNo}: + %% b) {published, ChPid, MsgSeqNo}: %% published via gm and channel; pending confirm. %% - %% {confirmed, ChPid}: + %% c) {confirmed, ChPid}: %% published via gm only, and confirmed; pending publication %% from channel. %% - %% The two outer forms only, need to go to the master state + %% d) discarded + %% seen via gm only as discarded. Pending publication from + %% channel + %% + %% The forms a, c and d only, need to go to the master state %% seen_status (SS). %% - %% The middle form only, needs to go through to the queue_process + %% The form b only, needs to go through to the queue_process %% state to form the msg_id_to_channel mapping (MTC). %% %% No messages that are enqueued from SQ at this point will have @@ -420,9 +425,12 @@ promote_me(From, #state { q = Q, %% this does not affect MS, nor which bits go through to SS in %% Master, or MTC in queue_process. - SS = dict:from_list([{MsgId, Status} - || {MsgId, {Status, _ChPid}} <- dict:to_list(MS), - Status =:= published orelse Status =:= confirmed]), + MSList = dict:to_list(MS), + SS = dict:from_list( + [E || E = {_MsgId, discarded} <- MSList] ++ + [{MsgId, Status} + || {MsgId, {Status, _ChPid}} <- MSList, + Status =:= published orelse Status =:= confirmed]), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( CPid, BQ, BQS, GM, SS), @@ -528,7 +536,11 @@ maybe_enqueue_message( immediately -> ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), State #state { msg_id_status = dict:erase(MsgId, MS) } - end + end; + {ok, discarded} -> + %% We've already heard from GM that the msg is to be + %% discarded. We won't see this again. + State #state { msg_id_status = dict:erase(MsgId, MS) } end. process_instruction( @@ -559,8 +571,7 @@ process_instruction( {{value, {Delivery = #delivery { msg_seq_no = MsgSeqNo, message = #basic_message { id = MsgId } }, - _EnqueueOnPromotion}}, - MQ1} -> + _EnqueueOnPromotion}}, MQ1} -> %% We received the msg from the channel %% first. Thus we need to deal with confirms %% here. @@ -604,6 +615,41 @@ process_instruction( State1 #state { backing_queue_state = BQS1, msg_id_ack = MA1 } end}; +process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, + State = #state { sender_queues = SQ, + backing_queue = BQ, + backing_queue_state = BQS, + msg_id_status = MS }) -> + %% Many of the comments around the publish head above apply here + %% too. + MS1 = dict:store(MsgId, discarded, MS), + {SQ1, MS2} = + case dict:find(ChPid, SQ) of + error -> + {SQ, MS1}; + {ok, MQ} -> + case queue:out(MQ) of + {empty, _MQ} -> + {SQ, MS1}; + {{value, {#delivery { + message = #basic_message { id = MsgId } }, + _EnqueueOnPromotion}}, MQ1} -> + %% We've already seen it from the channel, + %% we're not going to see this again, so don't + %% add it to MS + {dict:store(ChPid, MQ1, SQ), MS}; + {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ1} -> + %% The instruction was sent to us before we + %% were within the mirror_pids within the + %% #amqqueue{} record. We'll never receive the + %% message directly from the channel. + {SQ, MS} + end + end, + BQS1 = BQ:discard(Msg, ChPid, BQS), + {ok, State #state { sender_queues = SQ1, + msg_id_status = MS2, + backing_queue_state = BQS1 }}; process_instruction({set_length, Length}, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index a8f9974a..84987c88 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,8 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, is_duplicate/2, multiple_routing_keys/0]). + status/1, invoke/3, is_duplicate/2, discard/3, + multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -888,6 +889,8 @@ invoke(?MODULE, Fun, State) -> is_duplicate(_Msg, State) -> {false, State}. +discard(_Msg, _ChPid, State) -> State. + %%---------------------------------------------------------------------------- %% Minor helpers %%---------------------------------------------------------------------------- -- cgit v1.2.1 From a5fccc82365e8bd9b39dbc0202d14772d5b2d7aa Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 7 Apr 2011 14:30:47 +0100 Subject: make ssl work ...and handle socket errors It turns out that for active sockets the messages sent by tcp and ssl sockets to the controlling process differ gratuitously. --- src/rabbit_net.erl | 22 +++++++- src/rabbit_reader.erl | 146 +++++++++++++++++++++++++------------------------- 2 files changed, 93 insertions(+), 75 deletions(-) diff --git a/src/rabbit_net.erl b/src/rabbit_net.erl index c8514d90..b6cc28af 100644 --- a/src/rabbit_net.erl +++ b/src/rabbit_net.erl @@ -18,7 +18,7 @@ -include("rabbit.hrl"). -export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2, - async_recv/3, port_command/2, setopts/2, send/2, close/1, + recv/1, async_recv/3, port_command/2, setopts/2, send/2, close/1, sockname/1, peername/1, peercert/1]). %%--------------------------------------------------------------------------- @@ -42,6 +42,9 @@ -spec(getstat/2 :: (socket(), [stat_option()]) -> ok_val_or_error([{stat_option(), integer()}])). +-spec(recv/1 :: (socket()) -> + {'data', [char()] | binary()} | 'closed' | + rabbit_types:error(any()) | {'other', any()}). -spec(async_recv/3 :: (socket(), integer(), timeout()) -> rabbit_types:ok(any())). -spec(port_command/2 :: (socket(), iolist()) -> 'true'). @@ -83,6 +86,23 @@ getstat(Sock, Stats) when ?IS_SSL(Sock) -> getstat(Sock, Stats) when is_port(Sock) -> inet:getstat(Sock, Stats). +recv(Sock) when ?IS_SSL(Sock) -> + S = Sock#ssl_socket.ssl, + receive + {ssl, S, Data} -> {data, Data}; + {ssl_closed, S} -> closed; + {ssl_error, S, Reason} -> {error, Reason}; + Other -> {other, Other} + end; +recv(Sock) -> + S = Sock, + receive + {tcp, S, Data} -> {data, Data}; + {tcp_closed, S} -> closed; + {tcp_error, S, Reason} -> {error, Reason}; + Other -> {other, Other} + end. + async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) -> Pid = self(), Ref = make_ref(), diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index e210dba1..4dcb7446 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -252,82 +252,80 @@ recvloop(Deb, State = #v1{sock = Sock, recv_length = Length, buf = Buf}) -> State#v1{buf = [Rest]})) end. -mainloop(Deb, State = #v1{parent = Parent, sock = Sock}) -> - receive - {tcp, Sock, Data} -> - recvloop(Deb, State#v1{buf = [Data | State#v1.buf], - pending_recv = false}); - {tcp_closed, Sock} -> - if State#v1.connection_state =:= closed -> - State; - true -> - throw(connection_closed_abruptly) - end; - {conserve_memory, Conserve} -> - recvloop(Deb, internal_conserve_memory(Conserve, State)); - {channel_closing, ChPid} -> - ok = rabbit_channel:ready_for_close(ChPid), - channel_cleanup(ChPid), - mainloop(Deb, State); - {'EXIT', Parent, Reason} -> - terminate(io_lib:format("broker forced connection closure " - "with reason '~w'", [Reason]), State), - %% this is what we are expected to do according to - %% http://www.erlang.org/doc/man/sys.html - %% - %% If we wanted to be *really* nice we should wait for a - %% while for clients to close the socket at their end, - %% just as we do in the ordinary error case. However, - %% since this termination is initiated by our parent it is - %% probably more important to exit quickly. - exit(Reason); - {channel_exit, _Channel, E = {writer, send_failed, _Error}} -> - throw(E); - {channel_exit, Channel, Reason} -> - mainloop(Deb, handle_exception(State, Channel, Reason)); - {'DOWN', _MRef, process, ChPid, Reason} -> - mainloop(Deb, handle_dependent_exit(ChPid, Reason, State)); - terminate_connection -> - State; - handshake_timeout -> - if ?IS_RUNNING(State) orelse - State#v1.connection_state =:= closing orelse - State#v1.connection_state =:= closed -> - mainloop(Deb, State); - true -> - throw({handshake_timeout, State#v1.callback}) - end; - timeout -> - case State#v1.connection_state of - closed -> mainloop(Deb, State); - S -> throw({timeout, S}) - end; - {'$gen_call', From, {shutdown, Explanation}} -> - {ForceTermination, NewState} = terminate(Explanation, State), - gen_server:reply(From, ok), - case ForceTermination of - force -> ok; - normal -> mainloop(Deb, NewState) - end; - {'$gen_call', From, info} -> - gen_server:reply(From, infos(?INFO_KEYS, State)), - mainloop(Deb, State); - {'$gen_call', From, {info, Items}} -> - gen_server:reply(From, try {ok, infos(Items, State)} - catch Error -> {error, Error} - end), - mainloop(Deb, State); - {'$gen_cast', emit_stats} -> - State1 = internal_emit_stats(State), - mainloop(Deb, State1); - {system, From, Request} -> - sys:handle_system_msg(Request, From, - Parent, ?MODULE, Deb, State); - Other -> - %% internal error -> something worth dying for - exit({unexpected_message, Other}) +mainloop(Deb, State = #v1{sock = Sock}) -> + case rabbit_net:recv(Sock) of + {data, Data} -> recvloop(Deb, State#v1{buf = [Data | State#v1.buf], + pending_recv = false}); + closed -> if State#v1.connection_state =:= closed -> + State; + true -> + throw(connection_closed_abruptly) + end; + {error, Reason} -> throw({inet_error, Reason}); + {other, Other} -> handle_other(Other, Deb, State) end. +handle_other({conserve_memory, Conserve}, Deb, State) -> + recvloop(Deb, internal_conserve_memory(Conserve, State)); +handle_other({channel_closing, ChPid}, Deb, State) -> + ok = rabbit_channel:ready_for_close(ChPid), + channel_cleanup(ChPid), + mainloop(Deb, State); +handle_other({'EXIT', Parent, Reason}, _Deb, State = #v1{parent = Parent}) -> + terminate(io_lib:format("broker forced connection closure " + "with reason '~w'", [Reason]), State), + %% this is what we are expected to do according to + %% http://www.erlang.org/doc/man/sys.html + %% + %% If we wanted to be *really* nice we should wait for a while for + %% clients to close the socket at their end, just as we do in the + %% ordinary error case. However, since this termination is + %% initiated by our parent it is probably more important to exit + %% quickly. + exit(Reason); +handle_other({channel_exit, _Channel, E = {writer, send_failed, _Error}}, + _Deb, _State) -> + throw(E); +handle_other({channel_exit, Channel, Reason}, Deb, State) -> + mainloop(Deb, handle_exception(State, Channel, Reason)); +handle_other({'DOWN', _MRef, process, ChPid, Reason}, Deb, State) -> + mainloop(Deb, handle_dependent_exit(ChPid, Reason, State)); +handle_other(terminate_connection, _Deb, State) -> + State; +handle_other(handshake_timeout, Deb, State) + when ?IS_RUNNING(State) orelse + State#v1.connection_state =:= closing orelse + State#v1.connection_state =:= closed -> + mainloop(Deb, State); +handle_other(handshake_timeout, _Deb, State) -> + throw({handshake_timeout, State#v1.callback}); +handle_other(timeout, Deb, State = #v1{connection_state = closed}) -> + mainloop(Deb, State); +handle_other(timeout, _Deb, #v1{connection_state = S}) -> + throw({timeout, S}); +handle_other({'$gen_call', From, {shutdown, Explanation}}, Deb, State) -> + {ForceTermination, NewState} = terminate(Explanation, State), + gen_server:reply(From, ok), + case ForceTermination of + force -> ok; + normal -> mainloop(Deb, NewState) + end; +handle_other({'$gen_call', From, info}, Deb, State) -> + gen_server:reply(From, infos(?INFO_KEYS, State)), + mainloop(Deb, State); +handle_other({'$gen_call', From, {info, Items}}, Deb, State) -> + gen_server:reply(From, try {ok, infos(Items, State)} + catch Error -> {error, Error} + end), + mainloop(Deb, State); +handle_other({'$gen_cast', emit_stats}, Deb, State) -> + mainloop(Deb, internal_emit_stats(State)); +handle_other({system, From, Request}, Deb, State = #v1{parent = Parent}) -> + sys:handle_system_msg(Request, From, Parent, ?MODULE, Deb, State); +handle_other(Other, _Deb, _State) -> + %% internal error -> something worth dying for + exit({unexpected_message, Other}). + switch_callback(State = #v1{connection_state = blocked, heartbeater = Heartbeater}, Callback, Length) -> ok = rabbit_heartbeat:pause_monitor(Heartbeater), -- cgit v1.2.1 From e5a47db3cf448c377ba067ad3b2ab0dcd0b42d52 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 15:40:14 +0100 Subject: Reinstate trap_exit in channel. --- src/rabbit_channel.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 5099bf3f..0c12614c 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -156,6 +156,7 @@ ready_for_close(Pid) -> init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, Capabilities, CollectorPid, StartLimiterFun]) -> + process_flag(trap_exit, true), ok = pg_local:join(rabbit_channels, self()), StatsTimer = rabbit_event:init_stats_timer(), State = #ch{state = starting, -- cgit v1.2.1 From 9c42e0eece5965fb7d4375842bda712015d01f40 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 7 Apr 2011 15:55:19 +0100 Subject: Grrr. non-maskable-interrupt half way through implementing txns --- src/rabbit_mirror_queue_master.erl | 105 ++++++++++++++++++++++--------------- src/rabbit_mirror_queue_slave.erl | 20 +++++-- 2 files changed, 79 insertions(+), 46 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index b0a22edd..a61c32e0 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -26,7 +26,7 @@ -export([start/1, stop/0]). --export([promote_backing_queue_state/5]). +-export([promote_backing_queue_state/6]). -behaviour(rabbit_backing_queue). @@ -38,7 +38,9 @@ backing_queue_state, set_delivered, seen_status, - confirmed + confirmed, + ack_msg_id, + abandoned_txns }). %% --------------------------------------------------------------------------- @@ -73,16 +75,19 @@ init(#amqqueue { arguments = Args, name = QName } = Q, Recover, backing_queue_state = BQS, set_delivered = 0, seen_status = dict:new(), - confirmed = [] }. + confirmed = [], + ack_msg_id = dict:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus) -> +promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, AbandonedTxns) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, backing_queue_state = BQS, set_delivered = BQ:len(BQS), seen_status = SeenStatus, - confirmed = [] }. + confirmed = [], + ack_msg_id = dict:new(), + abandoned_txns = AbandonedTxns }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -119,7 +124,8 @@ publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, ChPid, State = #state { gm = GM, seen_status = SS, backing_queue = BQ, - backing_queue_state = BQS }) -> + backing_queue_state = BQS, + ack_msg_id = AM }) -> false = dict:is_key(MsgId, SS), %% ASSERTION %% Must use confirmed_broadcast here in order to guarantee that %% all slaves are forced to interpret this publish_delivered at @@ -128,7 +134,9 @@ publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, GM, {publish, {true, AckRequired}, ChPid, MsgProps, Msg}), {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), - {AckTag, State #state { backing_queue_state = BQS1 }}. + AM1 = maybe_store_acktag(AckTag, MsgId, AM), + {AckTag, State #state { backing_queue_state = BQS1, + ack_msg_id = AM1 }}. dropwhile(Fun, State = #state { gm = GM, backing_queue = BQ, @@ -175,7 +183,8 @@ drain_confirmed(State = #state { backing_queue = BQ, fetch(AckRequired, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, - set_delivered = SetDelivered }) -> + set_delivered = SetDelivered, + ack_msg_id = AM }) -> {Result, BQS1} = BQ:fetch(AckRequired, BQS), State1 = State #state { backing_queue_state = BQS1 }, case Result of @@ -186,53 +195,60 @@ fetch(AckRequired, State = #state { gm = GM, ok = gm:broadcast(GM, {fetch, AckRequired, MsgId, Remaining}), IsDelivered1 = IsDelivered orelse SetDelivered > 0, SetDelivered1 = lists:max([0, SetDelivered - 1]), + AM1 = maybe_store_acktag(AckTag, MsgId, AM), {{Message, IsDelivered1, AckTag, Remaining}, - State1 #state { set_delivered = SetDelivered1 }} + State1 #state { set_delivered = SetDelivered1, + ack_msg_id = AM1 }} end. ack(AckTags, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS }) -> + backing_queue_state = BQS, + ack_msg_id = AM }) -> {MsgIds, BQS1} = BQ:ack(AckTags, BQS), + AM1 = lists:foldl(fun dict:erase/2, AM, AckTags), case MsgIds of [] -> ok; _ -> ok = gm:broadcast(GM, {ack, MsgIds}) end, - {MsgIds, State #state { backing_queue_state = BQS1 }}. - -tx_publish(Txn, Msg, MsgProps, ChPid, #state {} = State) -> - %% gm:broadcast(GM, {tx_publish, Txn, MsgId, MsgProps, ChPid}) - State. + {MsgIds, State #state { backing_queue_state = BQS1, + ack_msg_id = AM1 }}. + +tx_publish(Txn, Msg, MsgProps, ChPid, + State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {tx_publish, Txn, ChPid, MsgProps, Msg}), + BQS1 = BQ:tx_publish(Txn, Msg, MsgProps, ChPid, State), + State #state { backing_queue_state = BQS1 }. -tx_ack(Txn, AckTags, #state {} = State) -> - %% gm:broadcast(GM, {tx_ack, Txn, MsgIds}) +tx_ack(Txn, AckTags, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + ack_msg_id = AM }) -> + MsgIds = lists:foldl( + fun (AckTag, Acc) -> [dict:fetch(AckTag, AM) | Acc] end, + [], AckTags), + ok = gm:broadcast(GM, {tx_ack, Txn, MsgIds}) State. -tx_rollback(Txn, #state {} = State) -> - %% gm:broadcast(GM, {tx_rollback, Txn}) - {[], State}. - -tx_commit(Txn, PostCommitFun, MsgPropsFun, #state {} = State) -> - %% Maybe don't want to transmit the MsgPropsFun but what choice do - %% we have? OTOH, on the slaves, things won't be expiring on their - %% own (props are interpreted by amqqueue, not vq), so if the msg - %% props aren't quite the same, that doesn't matter. - %% - %% The PostCommitFun is actually worse - we need to prevent that - %% from being invoked until we have confirmation from all the - %% slaves that they've done everything up to there. - %% - %% In fact, transactions are going to need work seeing as it's at - %% this point that VQ mentions amqqueue, which will thus not work - %% on the slaves - we need to make sure that all the slaves do the - %% tx_commit_post_msg_store at the same point, and then when they - %% all confirm that (scatter/gather), we can finally invoke the - %% PostCommitFun. - %% - %% Another idea is that the slaves are actually driven with - %% pubacks and thus only the master needs to support txns - %% directly. - {[], State}. +tx_rollback(Txn, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), + {AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), + {AckTags, State #state { backing_queue_state = BQS1 }}. + +tx_commit(Txn, PostCommitFun, MsgPropsFun, + State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS, + ack_msg_id = AM }) -> + ok = gm:confirmed_broadcast(GM, {tx_commit, Txn, MsgPropsFun}), + {AckTags, BQS1} = BQ:tx_commit(Txn, PostCommitFun, MsgPropsFun, BQS), + AM1 = lists:foldl(fun dict:erase/2, AM, AckTags), + {AckTags, State #state { backing_queue_state = BQS, + ack_msg_id = AM }}. requeue(AckTags, MsgPropsFun, State = #state { gm = GM, backing_queue = BQ, @@ -323,3 +339,8 @@ discard(Msg = #basic_message {}, ChPid, backing_queue_state = BQS }) -> ok = gm:broadcast(GM, {discard, ChPid, Msg}), State#state{backing_queue_state = BQ:discard(Msg, ChPid, BQS)}. + +maybe_store_acktag(undefined, _MsgId, AM) -> + AM; +maybe_store_acktag(AckTag, MsgId, AM) -> + dict:store(AckTag, MsgId, AM). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 628135b1..21a33341 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -55,7 +55,8 @@ sender_queues, %% :: Pid -> MsgQ msg_id_ack, %% :: MsgId -> AckTag - msg_id_status + msg_id_status, + open_transactions }). -define(SYNC_INTERVAL, 25). %% milliseconds @@ -105,7 +106,8 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), msg_id_ack = dict:new(), - msg_id_status = dict:new() + msg_id_status = dict:new(), + open_transactions = sets:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -358,7 +360,8 @@ promote_me(From, #state { q = Q, rate_timer_ref = RateTRef, sender_queues = SQ, msg_id_ack = MA, - msg_id_status = MS }) -> + msg_id_status = MS, + open_transactions = OT }) -> rabbit_log:info("Promoting slave ~p for ~s~n", [self(), rabbit_misc:rs(Q #amqqueue.name)]), {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), @@ -366,6 +369,11 @@ promote_me(From, #state { q = Q, gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), + %% Start by rolling back all open transactions + + [ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}) + || Txn <- sets:to_list(OT)], + %% We find all the messages that we've received from channels but %% not from gm, and if they're due to be enqueued on promotion %% then we pass them to the @@ -380,7 +388,7 @@ promote_me(From, #state { q = Q, %% affect confirmations: if the message was previously pending a %% confirmation then it still will be, under the same msg_id. So %% as a master, we need to be prepared to filter out the - %% publication of said messages from the channel (validate_message + %% publication of said messages from the channel (is_duplicate %% (thus such requeued messages must remain in the msg_id_status %% (MS) which becomes seen_status (SS) in the master)). %% @@ -424,6 +432,10 @@ promote_me(From, #state { q = Q, %% those messages are then requeued. However, as discussed above, %% this does not affect MS, nor which bits go through to SS in %% Master, or MTC in queue_process. + %% + %% Everything that's in MA gets requeued. Consequently the new + %% master should start with a fresh AM as there are no messages + %% pending acks (txns will have been rolled back). MSList = dict:to_list(MS), SS = dict:from_list( -- cgit v1.2.1 From ae69d3fdd9930d43baf132267c54330655a08a4b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 16:16:09 +0100 Subject: Do loop detection earlier. --- src/rabbit_log.erl | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index ba3bd234..dfa61b15 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -86,11 +86,12 @@ tap_trace_in(Message = #basic_message{exchange_name = #resource{ virtual_host = VHostBin, name = XNameBin}}) -> check_trace( + XNameBin, VHostBin, fun (TraceExchangeBin) -> {EncodedMetadata, Payload} = message_to_table(Message), - maybe_inject(TraceExchangeBin, VHostBin, XNameBin, - <<"publish">>, XNameBin, EncodedMetadata, Payload) + inject(TraceExchangeBin, VHostBin, <<"publish">>, + XNameBin, EncodedMetadata, Payload) end). tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, @@ -100,6 +101,7 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, DeliveryTag, ConsumerTagOrNone) -> check_trace( + XNameBin, VHostBin, fun (TraceExchangeBin) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, @@ -114,32 +116,27 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, [{<<"consumer_tag">>, longstr, ConsumerTag} | Fields0] end, - maybe_inject(TraceExchangeBin, VHostBin, XNameBin, - <<"deliver">>, QNameBin, Fields, Payload) + inject(TraceExchangeBin, VHostBin, <<"deliver">>, + QNameBin, Fields, Payload) end). -check_trace(VHostBin, F) -> +check_trace(XNameBin, VHostBin, F) -> case catch case application:get_env(rabbit, {trace_exchange, VHostBin}) of undefined -> ok; + {ok, XNameBin} -> ok; {ok, TraceExchangeBin} -> F(TraceExchangeBin) end of {'EXIT', Reason} -> info("Trace tap died with reason ~p~n", [Reason]); ok -> ok end. -maybe_inject(TraceExchangeBin, VHostBin, OriginalExchangeBin, - RKPrefix, RKSuffix, Table, Payload) -> - if - TraceExchangeBin =:= OriginalExchangeBin -> - ok; - true -> - rabbit_basic:publish( - rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), - <>, - #'P_basic'{headers = Table}, - Payload), - ok - end. +inject(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> + rabbit_basic:publish( + rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), + <>, + #'P_basic'{headers = Table}, + Payload), + ok. message_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, -- cgit v1.2.1 From eb4e5406e057c332ae5bd3537237bd6469f938b7 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 16:54:41 +0100 Subject: This seems to work fine. FIXME denied! --- src/rabbit_log.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index dfa61b15..90e8fdd6 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -106,7 +106,7 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, fun (TraceExchangeBin) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, {EncodedMetadata, Payload} = message_to_table(Message), - Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, %% FIXME later + Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, {<<"redelivered">>, signedint, RedeliveredNum}] ++ EncodedMetadata, Fields = case ConsumerTagOrNone of -- cgit v1.2.1 From cddec35bce9389cb4744ef4778979191ee00913b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 16:59:44 +0100 Subject: Cosmetic. --- src/rabbit_log.erl | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 90e8fdd6..8cd980ea 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -82,22 +82,22 @@ error(Fmt) -> error(Fmt, Args) when is_list(Args) -> gen_server:cast(?SERVER, {error, Fmt, Args}). -tap_trace_in(Message = #basic_message{exchange_name = #resource{ - virtual_host = VHostBin, - name = XNameBin}}) -> +tap_trace_in(Message = #basic_message{ + exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}) -> check_trace( XNameBin, VHostBin, fun (TraceExchangeBin) -> {EncodedMetadata, Payload} = message_to_table(Message), - inject(TraceExchangeBin, VHostBin, <<"publish">>, - XNameBin, EncodedMetadata, Payload) + publish(TraceExchangeBin, VHostBin, <<"publish">>, XNameBin, + EncodedMetadata, Payload) end). tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, - Message = #basic_message{exchange_name = #resource{ - virtual_host = VHostBin, - name = XNameBin}}}, + Message = #basic_message{ + exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}}, DeliveryTag, ConsumerTagOrNone) -> check_trace( @@ -110,14 +110,12 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, {<<"redelivered">>, signedint, RedeliveredNum}] ++ EncodedMetadata, Fields = case ConsumerTagOrNone of - none -> - Fields0; - ConsumerTag -> - [{<<"consumer_tag">>, longstr, ConsumerTag} - | Fields0] + none -> Fields0; + CTag -> [{<<"consumer_tag">>, longstr, CTag} | + Fields0] end, - inject(TraceExchangeBin, VHostBin, <<"deliver">>, - QNameBin, Fields, Payload) + publish(TraceExchangeBin, VHostBin, <<"deliver">>, QNameBin, + Fields, Payload) end). check_trace(XNameBin, VHostBin, F) -> @@ -130,12 +128,10 @@ check_trace(XNameBin, VHostBin, F) -> ok -> ok end. -inject(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> - rabbit_basic:publish( - rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), - <>, - #'P_basic'{headers = Table}, - Payload), +publish(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> + rabbit_basic:publish(rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), + <>, + #'P_basic'{headers = Table}, Payload), ok. message_to_table(#basic_message{exchange_name = #resource{name = XName}, -- cgit v1.2.1 From 6d4298a3b17534cf7edced9ef5ff21f4c9351732 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 17:06:29 +0100 Subject: Cosmetic. --- src/rabbit_control.erl | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 1ec36e49..6ab07111 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -282,17 +282,18 @@ action(list_consumers, Node, _Args, Opts, Inform) -> Other -> Other end; -action(set_env, Node, [VarStr, TermStr], _Opts, Inform) -> - Inform("Setting control variable ~s for node ~p to ~s", [VarStr, Node, TermStr]), - rpc_call(Node, application, set_env, [rabbit, parse_term(VarStr), parse_term(TermStr)]); +action(set_env, Node, [Var, Term], _Opts, Inform) -> + Inform("Setting control variable ~s for node ~p to ~s", [Var, Node, Term]), + rpc_call(Node, application, set_env, [rabbit, parse(Var), parse(Term)]); -action(get_env, Node, [VarStr], _Opts, Inform) -> - Inform("Getting control variable ~s for node ~p", [VarStr, Node]), - io:format("~p~n", [rpc_call(Node, application, get_env, [rabbit, parse_term(VarStr)])]); +action(get_env, Node, [Var], _Opts, Inform) -> + Inform("Getting control variable ~s for node ~p", [Var, Node]), + Val = rpc_call(Node, application, get_env, [rabbit, parse(Var)]), + io:format("~p~n", [Val]); -action(unset_env, Node, [VarStr], _Opts, Inform) -> - Inform("Clearing control variable ~s for node ~p", [VarStr, Node]), - rpc_call(Node, application, unset_env, [rabbit, parse_term(VarStr)]); +action(unset_env, Node, [Var], _Opts, Inform) -> + Inform("Clearing control variable ~s for node ~p", [Var, Node]), + rpc_call(Node, application, unset_env, [rabbit, parse(Var)]); action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), @@ -337,7 +338,7 @@ default_if_empty(List, Default) when is_list(List) -> true -> [list_to_atom(X) || X <- List] end. -parse_term(Str) -> +parse(Str) -> {ok, Tokens, _} = erl_scan:string(Str ++ "."), {ok, Term} = erl_parse:parse_term(Tokens), Term. -- cgit v1.2.1 From b169b18c7551646ada80d8adec78138d18cc525d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 17:11:07 +0100 Subject: Tracing has very little to do with rabbit_log, let's make it its own module. --- src/rabbit_channel.erl | 6 +-- src/rabbit_log.erl | 99 -------------------------------------- src/rabbit_trace.erl | 126 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 129 insertions(+), 102 deletions(-) create mode 100644 src/rabbit_trace.erl diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index e3dc47dc..6ec2a09f 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -280,7 +280,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, true -> deliver; false -> deliver_no_ack end, State), - rabbit_log:tap_trace_out(Msg, DeliveryTag, ConsumerTag), + rabbit_trace:tap_trace_out(Msg, DeliveryTag, ConsumerTag), noreply(State1#ch{next_tag = DeliveryTag + 1}); handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> @@ -604,7 +604,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, end, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> - rabbit_log:tap_trace_in(Message), + rabbit_trace:tap_trace_in(Message), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -673,7 +673,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, true -> get_no_ack; false -> get end, State), - rabbit_log:tap_trace_out(Msg, DeliveryTag, none), + rabbit_trace:tap_trace_out(Msg, DeliveryTag, none), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl index 8cd980ea..8207d6bc 100644 --- a/src/rabbit_log.erl +++ b/src/rabbit_log.erl @@ -26,11 +26,6 @@ -export([debug/1, debug/2, message/4, info/1, info/2, warning/1, warning/2, error/1, error/2]). --export([tap_trace_in/1, tap_trace_out/3]). - --include("rabbit.hrl"). --include("rabbit_framing.hrl"). - -define(SERVER, ?MODULE). %%---------------------------------------------------------------------------- @@ -82,100 +77,6 @@ error(Fmt) -> error(Fmt, Args) when is_list(Args) -> gen_server:cast(?SERVER, {error, Fmt, Args}). -tap_trace_in(Message = #basic_message{ - exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}) -> - check_trace( - XNameBin, - VHostBin, - fun (TraceExchangeBin) -> - {EncodedMetadata, Payload} = message_to_table(Message), - publish(TraceExchangeBin, VHostBin, <<"publish">>, XNameBin, - EncodedMetadata, Payload) - end). - -tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, - Message = #basic_message{ - exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}}, - DeliveryTag, - ConsumerTagOrNone) -> - check_trace( - XNameBin, - VHostBin, - fun (TraceExchangeBin) -> - RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - {EncodedMetadata, Payload} = message_to_table(Message), - Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, - {<<"redelivered">>, signedint, RedeliveredNum}] - ++ EncodedMetadata, - Fields = case ConsumerTagOrNone of - none -> Fields0; - CTag -> [{<<"consumer_tag">>, longstr, CTag} | - Fields0] - end, - publish(TraceExchangeBin, VHostBin, <<"deliver">>, QNameBin, - Fields, Payload) - end). - -check_trace(XNameBin, VHostBin, F) -> - case catch case application:get_env(rabbit, {trace_exchange, VHostBin}) of - undefined -> ok; - {ok, XNameBin} -> ok; - {ok, TraceExchangeBin} -> F(TraceExchangeBin) - end of - {'EXIT', Reason} -> info("Trace tap died with reason ~p~n", [Reason]); - ok -> ok - end. - -publish(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> - rabbit_basic:publish(rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), - <>, - #'P_basic'{headers = Table}, Payload), - ok. - -message_to_table(#basic_message{exchange_name = #resource{name = XName}, - routing_keys = RoutingKeys, - content = Content}) -> - #content{properties = #'P_basic'{content_type = ContentType, - content_encoding = ContentEncoding, - headers = Headers, - delivery_mode = DeliveryMode, - priority = Priority, - correlation_id = CorrelationId, - reply_to = ReplyTo, - expiration = Expiration, - message_id = MessageId, - timestamp = Timestamp, - type = Type, - user_id = UserId, - app_id = AppId}, - payload_fragments_rev = PFR} = - rabbit_binary_parser:ensure_content_decoded(Content), - Headers1 = prune_undefined( - [{<<"content_type">>, longstr, ContentType}, - {<<"content_encoding">>, longstr, ContentEncoding}, - {<<"headers">>, table, Headers}, - {<<"delivery_mode">>, signedint, DeliveryMode}, - {<<"priority">>, signedint, Priority}, - {<<"correlation_id">>, longstr, CorrelationId}, - {<<"reply_to">>, longstr, ReplyTo}, - {<<"expiration">>, longstr, Expiration}, - {<<"message_id">>, longstr, MessageId}, - {<<"timestamp">>, longstr, Timestamp}, - {<<"type">>, longstr, Type}, - {<<"user_id">>, longstr, UserId}, - {<<"app_id">>, longstr, AppId}]), - {[{<<"exchange_name">>, longstr, XName}, - {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, - {<<"headers">>, table, Headers1}, - {<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], - list_to_binary(lists:reverse(PFR))}. - -prune_undefined(Fields) -> - [F || F = {_, _, Value} <- Fields, - Value =/= undefined]. - %%-------------------------------------------------------------------- init([]) -> {ok, none}. diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl new file mode 100644 index 00000000..8f531808 --- /dev/null +++ b/src/rabbit_trace.erl @@ -0,0 +1,126 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_trace). + +-export([tap_trace_in/1, tap_trace_out/3]). + +-include("rabbit.hrl"). +-include("rabbit_framing.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +%% TODO + +-endif. + +%%---------------------------------------------------------------------------- + +tap_trace_in(Message = #basic_message{ + exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}) -> + check_trace( + XNameBin, + VHostBin, + fun (TraceExchangeBin) -> + {EncodedMetadata, Payload} = message_to_table(Message), + publish(TraceExchangeBin, VHostBin, <<"publish">>, XNameBin, + EncodedMetadata, Payload) + end). + +tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, + Message = #basic_message{ + exchange_name = #resource{virtual_host = VHostBin, + name = XNameBin}}}, + DeliveryTag, + ConsumerTagOrNone) -> + check_trace( + XNameBin, + VHostBin, + fun (TraceExchangeBin) -> + RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, + {EncodedMetadata, Payload} = message_to_table(Message), + Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, + {<<"redelivered">>, signedint, RedeliveredNum}] + ++ EncodedMetadata, + Fields = case ConsumerTagOrNone of + none -> Fields0; + CTag -> [{<<"consumer_tag">>, longstr, CTag} | + Fields0] + end, + publish(TraceExchangeBin, VHostBin, <<"deliver">>, QNameBin, + Fields, Payload) + end). + +check_trace(XNameBin, VHostBin, F) -> + case catch case application:get_env(rabbit, {trace_exchange, VHostBin}) of + undefined -> ok; + {ok, XNameBin} -> ok; + {ok, TraceExchangeBin} -> F(TraceExchangeBin) + end of + {'EXIT', Reason} -> rabbit_log:info("Trace tap died: ~p~n", [Reason]); + ok -> ok + end. + +publish(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> + rabbit_basic:publish(rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), + <>, + #'P_basic'{headers = Table}, Payload), + ok. + +message_to_table(#basic_message{exchange_name = #resource{name = XName}, + routing_keys = RoutingKeys, + content = Content}) -> + #content{properties = #'P_basic'{content_type = ContentType, + content_encoding = ContentEncoding, + headers = Headers, + delivery_mode = DeliveryMode, + priority = Priority, + correlation_id = CorrelationId, + reply_to = ReplyTo, + expiration = Expiration, + message_id = MessageId, + timestamp = Timestamp, + type = Type, + user_id = UserId, + app_id = AppId}, + payload_fragments_rev = PFR} = + rabbit_binary_parser:ensure_content_decoded(Content), + Headers1 = prune_undefined( + [{<<"content_type">>, longstr, ContentType}, + {<<"content_encoding">>, longstr, ContentEncoding}, + {<<"headers">>, table, Headers}, + {<<"delivery_mode">>, signedint, DeliveryMode}, + {<<"priority">>, signedint, Priority}, + {<<"correlation_id">>, longstr, CorrelationId}, + {<<"reply_to">>, longstr, ReplyTo}, + {<<"expiration">>, longstr, Expiration}, + {<<"message_id">>, longstr, MessageId}, + {<<"timestamp">>, longstr, Timestamp}, + {<<"type">>, longstr, Type}, + {<<"user_id">>, longstr, UserId}, + {<<"app_id">>, longstr, AppId}]), + {[{<<"exchange_name">>, longstr, XName}, + {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, + {<<"headers">>, table, Headers1}, + {<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], + list_to_binary(lists:reverse(PFR))}. + +prune_undefined(Fields) -> + [F || F = {_, _, Value} <- Fields, + Value =/= undefined]. -- cgit v1.2.1 From f468d313fb5f4e8ba8c25cae1670c2fa9c56930c Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 7 Apr 2011 17:28:22 +0100 Subject: changelog entries for 2.4.1 --- packaging/RPMS/Fedora/rabbitmq-server.spec | 3 +++ packaging/debs/Debian/debian/changelog | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index 45af770a..f9e9df8b 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -120,6 +120,9 @@ done rm -rf %{buildroot} %changelog +* Thu Apr 7 2011 Alexandru Scvortov 2.4.1-1 +- New Upstream Release + * Tue Mar 22 2011 Alexandru Scvortov 2.4.0-1 - New Upstream Release diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog index 2ca5074f..0383b955 100644 --- a/packaging/debs/Debian/debian/changelog +++ b/packaging/debs/Debian/debian/changelog @@ -1,3 +1,9 @@ +rabbitmq-server (2.4.1-1) lucid; urgency=low + + * New Upstream Release + + -- Alexandru Scvortov Thu, 07 Apr 2011 16:49:22 +0100 + rabbitmq-server (2.4.0-1) lucid; urgency=low * New Upstream Release -- cgit v1.2.1 From 7003dacafce0920bc019edf41bb47e7a0fe5663a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Apr 2011 17:29:09 +0100 Subject: Port and rewrite the manpage changes. --- docs/rabbitmqctl.1.xml | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 3550e5ea..62869158 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1264,6 +1264,67 @@ + + + Configuration variables + + Some configuration values can be changed at run time. Note + that this does not apply to all variables; many are only read + at startup - changing them will have no effect. + + + + set_env variable value + + + + variable + The name of the variable to set, as the string form of an Erlang term. + + + value + The value to set it to, as the string form of an Erlang term. + + + + Set the value of a configuration variable. + + + + + + get_env variable + + + + variable + The name of the variable to get, as the string form of an Erlang term. + + + + Get the value of a configuration variable, printing either + {ok,Value} or undefined. + + + + + + unset_env variable + + + + variable + The name of the variable to clear, as the string form of an Erlang term. + + + + Clear the value of a configuration variable. + + + + + + -- cgit v1.2.1 -- cgit v1.2.1 From a80715423df8ae8904d1de86864e88c4a8e75c3d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 7 Apr 2011 18:19:16 +0100 Subject: Well txns are still only half in. But I want to go home. --- include/rabbit_backing_queue_spec.hrl | 3 +- src/rabbit_amqqueue_process.erl | 4 +- src/rabbit_backing_queue.erl | 7 +-- src/rabbit_mirror_queue_master.erl | 92 +++++++++++++++++++++++++---------- src/rabbit_mirror_queue_slave.erl | 69 ++++++++++++++++++++++---- src/rabbit_variable_queue.erl | 4 +- 6 files changed, 135 insertions(+), 44 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index b0c5f13b..d9296bf6 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -71,6 +71,7 @@ -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((atom(), A) -> A), state()) -> state()). --spec(is_duplicate/2 :: (rabbit_types:basic_message(), state()) -> +-spec(is_duplicate/3 :: + (rabbit_types:txn(), rabbit_types:basic_message(), state()) -> {'false'|'published'|'discarded', state()}). -spec(discard/3 :: (rabbit_types:basic_message(), pid(), state()) -> state()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 79f6472d..d9be4909 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -523,7 +523,7 @@ attempt_delivery(Delivery = #delivery{txn = none, immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok end, - case BQ:is_duplicate(Message, BQS) of + case BQ:is_duplicate(none, Message, BQS) of {false, BQS1} -> PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = @@ -561,7 +561,7 @@ attempt_delivery(Delivery = #delivery{txn = Txn, message = Message}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> Confirm = should_confirm_message(Delivery, State), - case BQ:is_duplicate(Message, BQS) of + case BQ:is_duplicate(Txn, Message, BQS) of {false, BQS1} -> store_ch_record((ch_record(ChPid))#cr{txn = Txn}), BQS2 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 0bbbd559..0955a080 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -175,12 +175,13 @@ behaviour_info(callbacks) -> %% the BQ to signal that it's already seen this message (and in %% what capacity - i.e. was it published previously or discarded %% previously) and thus the message should be dropped. - {is_duplicate, 2}, + {is_duplicate, 3}, %% Called to inform the BQ about messages which have reached the %% queue, but are not going to be further passed to BQ for some - %% reason. Note that this is not invoked for messages for which - %% BQ:is_duplicate/2 has already returned {true, BQS}. + %% reason. Note that this is may be invoked for messages for + %% which BQ:is_duplicate/2 has already returned {'published' | + %% 'discarded', BQS}. {discard, 3} ]; behaviour_info(_Other) -> diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index a61c32e0..8714c44d 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, drain_confirmed/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, is_duplicate/2, discard/3]). + status/1, invoke/3, is_duplicate/3, discard/3]). -export([start/1, stop/0]). @@ -217,38 +217,59 @@ ack(AckTags, State = #state { gm = GM, tx_publish(Txn, Msg, MsgProps, ChPid, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {tx_publish, Txn, ChPid, MsgProps, Msg}), - BQS1 = BQ:tx_publish(Txn, Msg, MsgProps, ChPid, State), - State #state { backing_queue_state = BQS1 }. + backing_queue_state = BQS, + abandoned_txns = AbandonedTxns }) -> + case sets:is_element(Txn, AbandonedTxns) of + true -> State; + false -> ok = gm:broadcast(GM, {tx_publish, Txn, ChPid, MsgProps, Msg}), + BQS1 = BQ:tx_publish(Txn, Msg, MsgProps, ChPid, State), + State #state { backing_queue_state = BQS1 } + end. tx_ack(Txn, AckTags, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, - ack_msg_id = AM }) -> - MsgIds = lists:foldl( - fun (AckTag, Acc) -> [dict:fetch(AckTag, AM) | Acc] end, - [], AckTags), - ok = gm:broadcast(GM, {tx_ack, Txn, MsgIds}) - State. + ack_msg_id = AM, + abandoned_txns = AbandonedTxns }) -> + case sets:is_element(Txn, AbandonedTxns) of + true -> + State; + false -> + MsgIds = lists:foldl( + fun (AckTag, Acc) -> [dict:fetch(AckTag, AM) | Acc] end, + [], AckTags), + ok = gm:broadcast(GM, {tx_ack, Txn, MsgIds}), + State + end. tx_rollback(Txn, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), - {AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), - {AckTags, State #state { backing_queue_state = BQS1 }}. + backing_queue_state = BQS, + abandoned_txns = AbandonedTxns }) -> + case sets:is_element(Txn, AbandonedTxns) of + true -> {[], State}; + false -> ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), + {AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), + {AckTags, State #state { backing_queue_state = BQS1 }} + end. tx_commit(Txn, PostCommitFun, MsgPropsFun, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS, ack_msg_id = AM }) -> - ok = gm:confirmed_broadcast(GM, {tx_commit, Txn, MsgPropsFun}), - {AckTags, BQS1} = BQ:tx_commit(Txn, PostCommitFun, MsgPropsFun, BQS), - AM1 = lists:foldl(fun dict:erase/2, AM, AckTags), - {AckTags, State #state { backing_queue_state = BQS, - ack_msg_id = AM }}. + case sets:is_element(Txn, AbandonedTxns) of + true -> + %% Don't worry - the channel will explode as it'll still + %% try to commit on the old master. + {[], State}; + false -> + ok = gm:confirmed_broadcast(GM, {tx_commit, Txn, MsgPropsFun}), + {AckTags, BQS1} = BQ:tx_commit(Txn, PostCommitFun, MsgPropsFun, BQS), + AM1 = lists:foldl(fun dict:erase/2, AM, AckTags), + {AckTags, State #state { backing_queue_state = BQS, + ack_msg_id = AM }} + end. requeue(AckTags, MsgPropsFun, State = #state { gm = GM, backing_queue = BQ, @@ -291,7 +312,7 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. -is_duplicate(Message = #basic_message { id = MsgId }, +is_duplicate(none, Message = #basic_message { id = MsgId }, State = #state { seen_status = SS, backing_queue = BQ, backing_queue_state = BQS, @@ -330,15 +351,34 @@ is_duplicate(Message = #basic_message { id = MsgId }, {published, State #state { seen_status = dict:erase(MsgId, SS), confirmed = [MsgId | Confirmed] }}; {ok, discarded} -> - {discarded, State #state { seen_status = dict:erase(MsgId, SS) }} + %% Don't erase from SS here because discard/2 is about to + %% be called and we need to be able to detect this case + {discarded, State} + end; +is_duplicate(Txn, _Msg, State = #state { abandoned_txns = AbandonedTxns }) -> + %% There will be nothing in seen_status for any transactions that + %% are still in flight. + case sets:is_element(Txn, AbandonedTxns) of + true -> {published, State}; + false -> {false, State} end. -discard(Msg = #basic_message {}, ChPid, +discard(Msg = #basic_message { id = MsgId }, ChPid, State = #state { gm = GM, backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, {discard, ChPid, Msg}), - State#state{backing_queue_state = BQ:discard(Msg, ChPid, BQS)}. + backing_queue_state = BQS, + seen_status = SS }) -> + %% It's a massive error if we get told to discard something that's + %% already been published or published-and-confirmed. To do that + %% would require non FIFO access... + case dict:find(MsgId, SS) of + error -> + ok = gm:broadcast(GM, {discard, ChPid, Msg}), + State #state { backing_queue_state = BQ:discard(Msg, ChPid, BQS), + seen_status = dict:erase(MsgId, SS) }; + discarded -> + State + end. maybe_store_acktag(undefined, _MsgId, AM) -> AM; diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 21a33341..34ec5109 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -107,7 +107,7 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), msg_id_ack = dict:new(), msg_id_status = dict:new(), - open_transactions = sets:new() + open_transactions = dict:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -157,8 +157,32 @@ handle_call({gm_deaths, Deaths}, From, end; handle_call({run_backing_queue, Mod, Fun}, _From, State) -> - reply(ok, run_backing_queue(Mod, Fun, State)). + reply(ok, run_backing_queue(Mod, Fun, State)); +handle_call({commit, Txn, ChPid}, From, + State = #state { open_transactions = OT }) -> + case dict:find(Txn, OT) of + error -> + %% curious. We've not received _anything_ about this txn + %% so far via gm! + OT1 = dict:store(Txn, {undefined, {committed, From}}, OT), + noreply(State #state { open_transactions = OT1 }); + {ok, {committed, undefined}} -> + %% We've already finished via GM (our BQ has actually + %% replied back to us in the case of commit), so just + %% reply and tidy up. Note that because no one can every + %% consume from a slave, there are never going to be any + %% acks to return. + reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }); + {ok, {open, undefined}} -> + %% Save who we're from, but we're still waiting for the + %% commit to arrive via GM + OT1 = dict:store(Txn, {open, {committed, From}}, OT), + noreply(State #state { open_transactions = OT1 }); + {ok, {abandoned, undefined}} -> + %% GM must have told us to roll back. + reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }) + end. handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); @@ -192,7 +216,25 @@ handle_cast(update_ram_duration, handle_cast(sync_timeout, State) -> noreply(backing_queue_idle_timeout( - State #state { sync_timer_ref = undefined })). + State #state { sync_timer_ref = undefined })); + +handle_cast({rollback, Txn, ChPid}, + State #state { open_transactions = OT }) -> + %% Will never see {'committed', _} or {_, 'abandoned'} or + %% {_, {'committed', From}} here + case dict:find(Txn, OT) of + error -> + %% odd. We've not received anything from GM about this. + OT1 = dict:store(Txn, {undefined, abandoned}, OT), + noreply(State #state { open_transactions = OT1 }); + {ok, {open, undefined}} -> + %% The rollback is yet to arrive via GM. + OT1 = dict:store(Txn, {open, abandoned}, OT), + noreply(State #state { open_transactions = OT1 }); + {ok, {abandoned, undefined}} -> + %% GM has already rolled back. Tidy up. + noreply(State #state { open_transactions = dict:erase(Txn, OT) }) + end. handle_info(timeout, State) -> noreply(backing_queue_idle_timeout(State)); @@ -370,9 +412,12 @@ promote_me(From, #state { q = Q, ok = gm:confirmed_broadcast(GM, heartbeat), %% Start by rolling back all open transactions - - [ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}) - || Txn <- sets:to_list(OT)], + BQS1 = lists:foldl( + fun (Txn, BQSN) -> + ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), + {_AckTags, BQSN1} = BQ:tx_rollback(Txn, BQSN), + BQSN1 + end, BQS, dict:fetch_keys(OT)), %% We find all the messages that we've received from channels but %% not from gm, and if they're due to be enqueued on promotion @@ -445,8 +490,7 @@ promote_me(From, #state { q = Q, Status =:= published orelse Status =:= confirmed]), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, SS), - + CPid, BQ, BQS1, GM, SS, OT), MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || @@ -516,7 +560,8 @@ stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> maybe_enqueue_message( Delivery = #delivery { message = #basic_message { id = MsgId }, msg_seq_no = MsgSeqNo, - sender = ChPid }, + sender = ChPid, + txn = none }, EnqueueOnPromotion, State = #state { sender_queues = SQ, msg_id_status = MS }) -> @@ -553,7 +598,11 @@ maybe_enqueue_message( %% We've already heard from GM that the msg is to be %% discarded. We won't see this again. State #state { msg_id_status = dict:erase(MsgId, MS) } - end. + end; +maybe_enqueue_message(_Delivery, State) -> + %% In a txn. Txns are completely driven by gm for simplicity, so + %% we're not going to do anything here. + State. process_instruction( {publish, Deliver, ChPid, MsgProps, Msg = #basic_message { id = MsgId }}, diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 84987c88..7a3c17a2 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -22,7 +22,7 @@ requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, is_duplicate/2, discard/3, + status/1, invoke/3, is_duplicate/3, discard/3, multiple_routing_keys/0]). -export([start/1, stop/0]). @@ -887,7 +887,7 @@ status(#vqstate { invoke(?MODULE, Fun, State) -> Fun(?MODULE, State). -is_duplicate(_Msg, State) -> {false, State}. +is_duplicate(_Txn, _Msg, State) -> {false, State}. discard(_Msg, _ChPid, State) -> State. -- cgit v1.2.1 From 6c81a81a454eb092f285bf824eaf5a655bc86c12 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 00:11:23 +0100 Subject: Well, getting closer. But it's not done yet, and I may have discovered a rather fatal problem with the whole idea of supporting txns in mirrors anyway in that because of the coalescing going on, there is absolutely no indication of when the BQ finally completes adding the msgs to the queue. Thus the only solution here might be to ban coalescing in this case --- src/rabbit_mirror_queue_master.erl | 16 +++-- src/rabbit_mirror_queue_slave.erl | 133 ++++++++++++++++++++++++++++++++----- 2 files changed, 126 insertions(+), 23 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 8714c44d..a59d64d4 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -239,7 +239,8 @@ tx_ack(Txn, AckTags, State = #state { gm = GM, fun (AckTag, Acc) -> [dict:fetch(AckTag, AM) | Acc] end, [], AckTags), ok = gm:broadcast(GM, {tx_ack, Txn, MsgIds}), - State + BQS1 = BQ:tx_ack(Txn, AckTags, BQS), + State #state { backing_queue_state = BQS1 } end. tx_rollback(Txn, State = #state { gm = GM, @@ -248,8 +249,8 @@ tx_rollback(Txn, State = #state { gm = GM, abandoned_txns = AbandonedTxns }) -> case sets:is_element(Txn, AbandonedTxns) of true -> {[], State}; - false -> ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), - {AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), + false -> {AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), + ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), {AckTags, State #state { backing_queue_state = BQS1 }} end. @@ -264,9 +265,14 @@ tx_commit(Txn, PostCommitFun, MsgPropsFun, %% try to commit on the old master. {[], State}; false -> - ok = gm:confirmed_broadcast(GM, {tx_commit, Txn, MsgPropsFun}), {AckTags, BQS1} = BQ:tx_commit(Txn, PostCommitFun, MsgPropsFun, BQS), - AM1 = lists:foldl(fun dict:erase/2, AM, AckTags), + {MsgIds, AM1} = lists:foldl( + fun (AckTag, {MsgIdsN, AMN}) -> + MsgId = dict:fetch(AckTag, AMN), + {[MsgId|MsgIdsN], dict:erase(AckTag, AMN)} + end, {[], AM}, AckTags), + ok = gm:confirmed_broadcast( + GM, {tx_commit, Txn, MsgPropsFun, MsgIds}), {AckTags, State #state { backing_queue_state = BQS, ack_msg_id = AM }} end. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 34ec5109..a61abbd7 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -163,22 +163,20 @@ handle_call({commit, Txn, ChPid}, From, State = #state { open_transactions = OT }) -> case dict:find(Txn, OT) of error -> - %% curious. We've not received _anything_ about this txn - %% so far via gm! + %% We've not received anything about this txn so far via + %% gm! OT1 = dict:store(Txn, {undefined, {committed, From}}, OT), noreply(State #state { open_transactions = OT1 }); - {ok, {committed, undefined}} -> - %% We've already finished via GM (our BQ has actually - %% replied back to us in the case of commit), so just - %% reply and tidy up. Note that because no one can every - %% consume from a slave, there are never going to be any - %% acks to return. - reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }); {ok, {open, undefined}} -> %% Save who we're from, but we're still waiting for the %% commit to arrive via GM OT1 = dict:store(Txn, {open, {committed, From}}, OT), noreply(State #state { open_transactions = OT1 }); + {ok, {committed, undefined}} -> + %% We've already finished via GM (our BQ has actually + %% replied back to us in the case of commit), so just + %% reply and tidy up. + reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }); {ok, {abandoned, undefined}} -> %% GM must have told us to roll back. reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }) @@ -224,7 +222,7 @@ handle_cast({rollback, Txn, ChPid}, %% {_, {'committed', From}} here case dict:find(Txn, OT) of error -> - %% odd. We've not received anything from GM about this. + %% We've not received anything from GM about this. OT1 = dict:store(Txn, {undefined, abandoned}, OT), noreply(State #state { open_transactions = OT1 }); {ok, {open, undefined}} -> @@ -292,6 +290,7 @@ prioritise_cast(Msg, _State) -> {run_backing_queue, _Mod, _Fun} -> 6; sync_timeout -> 6; {gm, _Msg} -> 5; + {post_commit, _Txn, _AckTags} -> 4; _ -> 0 end. @@ -340,6 +339,10 @@ bq_init(BQ, Q, Recover) -> end) end). +run_backing_queue(rabbit_mirror_queue_master, Fun, State) -> + %% Yes, this might look a little crazy, but see comments around + %% process_instruction({tx_commit,...}, State). + Fun(rabbit_mirror_queue_master, State); run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. @@ -412,12 +415,14 @@ promote_me(From, #state { q = Q, ok = gm:confirmed_broadcast(GM, heartbeat), %% Start by rolling back all open transactions + AbandonedTxns = [Txn || {Txn, {open, _TxnStatusByChannel}} + <- dict:to_list(OT)], BQS1 = lists:foldl( fun (Txn, BQSN) -> ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), {_AckTags, BQSN1} = BQ:tx_rollback(Txn, BQSN), BQSN1 - end, BQS, dict:fetch_keys(OT)), + end, BQS, AbandonedTxns), %% We find all the messages that we've received from channels but %% not from gm, and if they're due to be enqueued on promotion @@ -490,7 +495,7 @@ promote_me(From, #state { q = Q, Status =:= published orelse Status =:= confirmed]), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS1, GM, SS, OT), + CPid, BQ, BQS1, GM, SS, sets:from_list(AbandonedTxns)), MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || @@ -750,7 +755,7 @@ process_instruction({ack, MsgIds}, State = #state { backing_queue = BQ, backing_queue_state = BQS, msg_id_ack = MA }) -> - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA), + {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, remove), {MsgIds1, BQS1} = BQ:ack(AckTags, BQS), [] = MsgIds1 -- MsgIds, %% ASSERTION {ok, State #state { msg_id_ack = MA1, @@ -759,7 +764,7 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, State = #state { backing_queue = BQ, backing_queue_state = BQS, msg_id_ack = MA }) -> - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA), + {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, remove), {ok, case length(AckTags) =:= length(MsgIds) of true -> {MsgIds, BQS1} = BQ:requeue(AckTags, MsgPropsFun, BQS), @@ -774,19 +779,111 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, State #state { msg_id_ack = dict:new(), backing_queue_state = BQS2 } end}; +process_instruction({tx_publish, Txn, ChPid, MsgProps, Msg}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS, + open_transactions = OT }) -> + %% Will never see abandoned or committed in the LHS + OT1 = case dict:find(Txn, OT) of + error -> + dict:store(Txn, {open, undefined}, OT); + {ok, {open, _TxnStatusByChannel}} -> + OT + end, + BQS1 = BQ:tx_publish(Txn, Msg, MsgProps, ChPid, BQS), + {ok, State #state { backing_queue_state = BQS1, + open_transactions = OT1 }}; +process_instruction({tx_ack, Txn, MsgIds}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS, + open_transactions = OT, + msg_id_ack = MA }) -> + %% Will never see abandoned or committed in the LHS + OT1 = case dict:find(Txn, OT) of + error -> + dict:store(Txn, {open, undefined}, OT); + {ok, {open, _TxnStatusByChannel}} -> + OT + end, + %% Remember, rollback of a txn with acks simply undoes the ack - + %% the msg itself is not requeued or anything. Thus we make sure + %% msg_ids_to_acktags does not remove the entry from MQ, and we + %% will do the remove when we commit. + {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, keep), + BQS1 = BQ:tx_ack(Txn, AckTags, BQS), + {ok, State #store { backing_queue_state = BQS1, + open_transactions = OT1, + msg_id_ack = MA1 }}; +process_instruction({tx_commit, Txn, MsgPropsFun, MsgIds}, + State = #state { backing_queue = BQ, + backing_queue_state = BQS, + open_transactions = OT, + msg_id_ack = MA }) -> + %% We must remove the ack tags from MQ at this point + {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, remove), + %% We won't adjust open_transactions until we get the post_commit + %% callback, unless we've already seen the commit from the channel + case dict:find(Txn, OT) of + {open, {committed, From}} -> + {AckTags1, BQS1} = + BQ:tx_commit(Txn, fun () -> gen_server2:reply(From, ok) end, + MsgPropsFun, BQS), + OT1 = dict:erase(Txn, OT), + true = lists:usort(AckTags) =:= lists:usort(AckTags1), %% ASSERTION + {ok, State #state { backing_queue_state = BQS, + open_transactions = OT1, + msg_id_ack = MA1 }}; + Status -> + %% We have to cope with the possibility that we'll get + %% promoted before the txn finishes, and rely on slight + %% magic if we do complete here. + Me = self(), + F = fun () -> rabbit_amqqueue:run_backing_queue_async( + Me, rabbit_mirror_queue_master, + fun (rabbit_mirror_queue_master, + State1 = #state { open_transactions = OT2 }) -> + OT3 = case dict:find(Txn, OT2) of + {committing, undefined} -> + dict:store( + Txn, {committed, undefined}, + OT2); + {committing, {committed, From}} -> + gen_server2:reply(From, ok), + dict:erase(Txn, OT2) + end, + State1 #state { open_transactions = OT3 } + end) + end, + {AckTags1, BQS1} = BQ:tx_commit(Txn, F, MsgPropsFun, BQS), + true = lists:usort(AckTags) =:= lists:usort(AckTags1), %% ASSERTION + OT1 = case Status of + error -> + dict:store(Txn, {committing, undefined}, OT); + {open, TxnStatusByChannel} -> + dict:store(Txn, {committing, TxnStatusByChannel}, OT) + end, + {ok, State #state { backing_queue_state = BQS, + open_transactions = OT1, + msg_id_ack = MA1 }}} + end; + process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:delete_and_terminate(BQS), {stop, State #state { backing_queue_state = undefined }}. -msg_ids_to_acktags(MsgIds, MA) -> +msg_ids_to_acktags(MsgIds, MA, RemoveOrKeep) -> {AckTags, MA1} = lists:foldl(fun (MsgId, {AckTagsN, MAN}) -> case dict:find(MsgId, MA) of - error -> {AckTagsN, MAN}; - {ok, AckTag} -> {[AckTag | AckTagsN], - dict:erase(MsgId, MAN)} + error -> + {AckTagsN, MAN}; + {ok, AckTag} when RemoveOrKeep =:= remove -> + {[AckTag | AckTagsN], + dict:erase(MsgId, MAN)}; + {ok, AckTag} when RemoveOrKeep =:= keep -> + {[AckTag | AckTagsN], MAN} end end, {[], MA}, MsgIds), {lists:reverse(AckTags), MA1}. -- cgit v1.2.1 From 34c34d59585af7ae7e51e30301765a5eadff7609 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 00:24:02 +0100 Subject: cosmetic --- src/rabbit_amqqueue.erl | 5 ++--- src/rabbit_binding.erl | 20 ++++++++------------ src/rabbit_exchange.erl | 2 +- 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e813d75c..77d3841b 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -16,8 +16,7 @@ -module(rabbit_amqqueue). --export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, - purge/1]). +-export([start/0, stop/0, declare/5, delete_immediately/1, delete/3, purge/1]). -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, @@ -58,7 +57,7 @@ -type(queue_or_not_found() :: rabbit_types:amqqueue() | 'not_found'). --spec(start/0 :: () -> [rabbit_amqqueue:name()]). +-spec(start/0 :: () -> [name()]). -spec(stop/0 :: () -> 'ok'). -spec(declare/5 :: (name(), boolean(), boolean(), diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b2d84143..8633ed13 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -152,18 +152,14 @@ add(Src, Dst, B) -> [] -> Durable = all_durable([Src, Dst]), case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> - ok = sync_binding(B, Durable, fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback(Src, add_bindings, - [Tx, Src, [B]]), - rabbit_event:notify_if(not Tx, binding_created, - info(B)) - end; - %% Binding exists, to queue on node which - %% is in the middle of starting - false -> - rabbit_misc:const(not_found) + true -> ok = sync_binding(B, Durable, fun mnesia:write/3), + fun (Tx) -> + ok = rabbit_exchange:callback( + Src, add_bindings, [Tx, Src, [B]]), + rabbit_event:notify_if( + not Tx, binding_created, info(B)) + end; + false -> rabbit_misc:const(not_found) end; [_] -> fun rabbit_misc:const_ok/1 end. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 2fe98e4b..623adf0b 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> [rabbit_exchange:name()]). +-spec(recover/0 :: () -> [name()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), -- cgit v1.2.1 From b95d56981fceafdb17edad3be1d7bb70e0fb2268 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 10:27:19 +0100 Subject: Sod it - transactions are too hard to do in mirror queues so don't bother. I know how to do it, but it's horrifically messy, and the margin is too small --- src/rabbit_mirror_queue_master.erl | 96 +++++-------------- src/rabbit_mirror_queue_slave.erl | 183 +++++-------------------------------- 2 files changed, 44 insertions(+), 235 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index a59d64d4..387dfbc4 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -26,7 +26,7 @@ -export([start/1, stop/0]). --export([promote_backing_queue_state/6]). +-export([promote_backing_queue_state/5]). -behaviour(rabbit_backing_queue). @@ -39,8 +39,7 @@ set_delivered, seen_status, confirmed, - ack_msg_id, - abandoned_txns + ack_msg_id }). %% --------------------------------------------------------------------------- @@ -78,7 +77,7 @@ init(#amqqueue { arguments = Args, name = QName } = Q, Recover, confirmed = [], ack_msg_id = dict:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, AbandonedTxns) -> +promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, @@ -86,8 +85,7 @@ promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, AbandonedTxns) -> set_delivered = BQ:len(BQS), seen_status = SeenStatus, confirmed = [], - ack_msg_id = dict:new(), - abandoned_txns = AbandonedTxns }. + ack_msg_id = dict:new() }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -214,68 +212,20 @@ ack(AckTags, State = #state { gm = GM, {MsgIds, State #state { backing_queue_state = BQS1, ack_msg_id = AM1 }}. -tx_publish(Txn, Msg, MsgProps, ChPid, - State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - abandoned_txns = AbandonedTxns }) -> - case sets:is_element(Txn, AbandonedTxns) of - true -> State; - false -> ok = gm:broadcast(GM, {tx_publish, Txn, ChPid, MsgProps, Msg}), - BQS1 = BQ:tx_publish(Txn, Msg, MsgProps, ChPid, State), - State #state { backing_queue_state = BQS1 } - end. +tx_publish(_Txn, _Msg, _MsgProps, _ChPid, State) -> + %% We don't support txns in mirror queues + State. -tx_ack(Txn, AckTags, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - ack_msg_id = AM, - abandoned_txns = AbandonedTxns }) -> - case sets:is_element(Txn, AbandonedTxns) of - true -> - State; - false -> - MsgIds = lists:foldl( - fun (AckTag, Acc) -> [dict:fetch(AckTag, AM) | Acc] end, - [], AckTags), - ok = gm:broadcast(GM, {tx_ack, Txn, MsgIds}), - BQS1 = BQ:tx_ack(Txn, AckTags, BQS), - State #state { backing_queue_state = BQS1 } - end. +tx_ack(_Txn, _AckTags, State) -> + %% We don't support txns in mirror queues + State. -tx_rollback(Txn, State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - abandoned_txns = AbandonedTxns }) -> - case sets:is_element(Txn, AbandonedTxns) of - true -> {[], State}; - false -> {AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), - ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), - {AckTags, State #state { backing_queue_state = BQS1 }} - end. +tx_rollback(_Txn, State) -> + {[], State}. -tx_commit(Txn, PostCommitFun, MsgPropsFun, - State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS, - ack_msg_id = AM }) -> - case sets:is_element(Txn, AbandonedTxns) of - true -> - %% Don't worry - the channel will explode as it'll still - %% try to commit on the old master. - {[], State}; - false -> - {AckTags, BQS1} = BQ:tx_commit(Txn, PostCommitFun, MsgPropsFun, BQS), - {MsgIds, AM1} = lists:foldl( - fun (AckTag, {MsgIdsN, AMN}) -> - MsgId = dict:fetch(AckTag, AMN), - {[MsgId|MsgIdsN], dict:erase(AckTag, AMN)} - end, {[], AM}, AckTags), - ok = gm:confirmed_broadcast( - GM, {tx_commit, Txn, MsgPropsFun, MsgIds}), - {AckTags, State #state { backing_queue_state = BQS, - ack_msg_id = AM }} - end. +tx_commit(_Txn, PostCommitFun, _MsgPropsFun, State) -> + PostCommitFun(), %% Probably must run it to avoid deadlocks + {[], State}. requeue(AckTags, MsgPropsFun, State = #state { gm = GM, backing_queue = BQ, @@ -361,13 +311,10 @@ is_duplicate(none, Message = #basic_message { id = MsgId }, %% be called and we need to be able to detect this case {discarded, State} end; -is_duplicate(Txn, _Msg, State = #state { abandoned_txns = AbandonedTxns }) -> - %% There will be nothing in seen_status for any transactions that - %% are still in flight. - case sets:is_element(Txn, AbandonedTxns) of - true -> {published, State}; - false -> {false, State} - end. +is_duplicate(_Txn, _Msg, State) -> + %% In a transaction. We don't support txns in mirror queues. But + %% it's probably not a duplicate... + {false, State}. discard(Msg = #basic_message { id = MsgId }, ChPid, State = #state { gm = GM, @@ -376,13 +323,14 @@ discard(Msg = #basic_message { id = MsgId }, ChPid, seen_status = SS }) -> %% It's a massive error if we get told to discard something that's %% already been published or published-and-confirmed. To do that - %% would require non FIFO access... + %% would require non FIFO access. Hence we should not find + %% 'published' or 'confirmed' in this dict:find. case dict:find(MsgId, SS) of error -> ok = gm:broadcast(GM, {discard, ChPid, Msg}), State #state { backing_queue_state = BQ:discard(Msg, ChPid, BQS), seen_status = dict:erase(MsgId, SS) }; - discarded -> + {ok, discarded} -> State end. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index a61abbd7..8ca82fa1 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -55,8 +55,7 @@ sender_queues, %% :: Pid -> MsgQ msg_id_ack, %% :: MsgId -> AckTag - msg_id_status, - open_transactions + msg_id_status }). -define(SYNC_INTERVAL, 25). %% milliseconds @@ -106,8 +105,7 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), msg_id_ack = dict:new(), - msg_id_status = dict:new(), - open_transactions = dict:new() + msg_id_status = dict:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -159,28 +157,9 @@ handle_call({gm_deaths, Deaths}, From, handle_call({run_backing_queue, Mod, Fun}, _From, State) -> reply(ok, run_backing_queue(Mod, Fun, State)); -handle_call({commit, Txn, ChPid}, From, - State = #state { open_transactions = OT }) -> - case dict:find(Txn, OT) of - error -> - %% We've not received anything about this txn so far via - %% gm! - OT1 = dict:store(Txn, {undefined, {committed, From}}, OT), - noreply(State #state { open_transactions = OT1 }); - {ok, {open, undefined}} -> - %% Save who we're from, but we're still waiting for the - %% commit to arrive via GM - OT1 = dict:store(Txn, {open, {committed, From}}, OT), - noreply(State #state { open_transactions = OT1 }); - {ok, {committed, undefined}} -> - %% We've already finished via GM (our BQ has actually - %% replied back to us in the case of commit), so just - %% reply and tidy up. - reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }); - {ok, {abandoned, undefined}} -> - %% GM must have told us to roll back. - reply(ok, State #state { open_transactions = dict:erase(Txn, OT) }) - end. +handle_call({commit, _Txn, _ChPid}, _From, State) -> + %% We don't support transactions in mirror queues + reply(ok, State). handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); @@ -216,23 +195,9 @@ handle_cast(sync_timeout, State) -> noreply(backing_queue_idle_timeout( State #state { sync_timer_ref = undefined })); -handle_cast({rollback, Txn, ChPid}, - State #state { open_transactions = OT }) -> - %% Will never see {'committed', _} or {_, 'abandoned'} or - %% {_, {'committed', From}} here - case dict:find(Txn, OT) of - error -> - %% We've not received anything from GM about this. - OT1 = dict:store(Txn, {undefined, abandoned}, OT), - noreply(State #state { open_transactions = OT1 }); - {ok, {open, undefined}} -> - %% The rollback is yet to arrive via GM. - OT1 = dict:store(Txn, {open, abandoned}, OT), - noreply(State #state { open_transactions = OT1 }); - {ok, {abandoned, undefined}} -> - %% GM has already rolled back. Tidy up. - noreply(State #state { open_transactions = dict:erase(Txn, OT) }) - end. +handle_cast({rollback, _Txn, _ChPid}, State) -> + %% We don't support transactions in mirror queues + noreply(State). handle_info(timeout, State) -> noreply(backing_queue_idle_timeout(State)); @@ -405,8 +370,7 @@ promote_me(From, #state { q = Q, rate_timer_ref = RateTRef, sender_queues = SQ, msg_id_ack = MA, - msg_id_status = MS, - open_transactions = OT }) -> + msg_id_status = MS }) -> rabbit_log:info("Promoting slave ~p for ~s~n", [self(), rabbit_misc:rs(Q #amqqueue.name)]), {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), @@ -414,16 +378,6 @@ promote_me(From, #state { q = Q, gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), - %% Start by rolling back all open transactions - AbandonedTxns = [Txn || {Txn, {open, _TxnStatusByChannel}} - <- dict:to_list(OT)], - BQS1 = lists:foldl( - fun (Txn, BQSN) -> - ok = gm:confirmed_broadcast(GM, {tx_rollback, Txn}), - {_AckTags, BQSN1} = BQ:tx_rollback(Txn, BQSN), - BQSN1 - end, BQS, AbandonedTxns), - %% We find all the messages that we've received from channels but %% not from gm, and if they're due to be enqueued on promotion %% then we pass them to the @@ -495,7 +449,7 @@ promote_me(From, #state { q = Q, Status =:= published orelse Status =:= confirmed]), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS1, GM, SS, sets:from_list(AbandonedTxns)), + CPid, BQ, BQS, GM, SS), MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || @@ -604,9 +558,8 @@ maybe_enqueue_message( %% discarded. We won't see this again. State #state { msg_id_status = dict:erase(MsgId, MS) } end; -maybe_enqueue_message(_Delivery, State) -> - %% In a txn. Txns are completely driven by gm for simplicity, so - %% we're not going to do anything here. +maybe_enqueue_message(_Delivery, _EnqueueOnPromotion, State) -> + %% We don't support txns in mirror queues. State. process_instruction( @@ -755,7 +708,7 @@ process_instruction({ack, MsgIds}, State = #state { backing_queue = BQ, backing_queue_state = BQS, msg_id_ack = MA }) -> - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, remove), + {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA), {MsgIds1, BQS1} = BQ:ack(AckTags, BQS), [] = MsgIds1 -- MsgIds, %% ASSERTION {ok, State #state { msg_id_ack = MA1, @@ -764,7 +717,7 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, State = #state { backing_queue = BQ, backing_queue_state = BQS, msg_id_ack = MA }) -> - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, remove), + {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA), {ok, case length(AckTags) =:= length(MsgIds) of true -> {MsgIds, BQS1} = BQ:requeue(AckTags, MsgPropsFun, BQS), @@ -779,113 +732,21 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, State #state { msg_id_ack = dict:new(), backing_queue_state = BQS2 } end}; -process_instruction({tx_publish, Txn, ChPid, MsgProps, Msg}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS, - open_transactions = OT }) -> - %% Will never see abandoned or committed in the LHS - OT1 = case dict:find(Txn, OT) of - error -> - dict:store(Txn, {open, undefined}, OT); - {ok, {open, _TxnStatusByChannel}} -> - OT - end, - BQS1 = BQ:tx_publish(Txn, Msg, MsgProps, ChPid, BQS), - {ok, State #state { backing_queue_state = BQS1, - open_transactions = OT1 }}; -process_instruction({tx_ack, Txn, MsgIds}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS, - open_transactions = OT, - msg_id_ack = MA }) -> - %% Will never see abandoned or committed in the LHS - OT1 = case dict:find(Txn, OT) of - error -> - dict:store(Txn, {open, undefined}, OT); - {ok, {open, _TxnStatusByChannel}} -> - OT - end, - %% Remember, rollback of a txn with acks simply undoes the ack - - %% the msg itself is not requeued or anything. Thus we make sure - %% msg_ids_to_acktags does not remove the entry from MQ, and we - %% will do the remove when we commit. - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, keep), - BQS1 = BQ:tx_ack(Txn, AckTags, BQS), - {ok, State #store { backing_queue_state = BQS1, - open_transactions = OT1, - msg_id_ack = MA1 }}; -process_instruction({tx_commit, Txn, MsgPropsFun, MsgIds}, - State = #state { backing_queue = BQ, - backing_queue_state = BQS, - open_transactions = OT, - msg_id_ack = MA }) -> - %% We must remove the ack tags from MQ at this point - {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA, remove), - %% We won't adjust open_transactions until we get the post_commit - %% callback, unless we've already seen the commit from the channel - case dict:find(Txn, OT) of - {open, {committed, From}} -> - {AckTags1, BQS1} = - BQ:tx_commit(Txn, fun () -> gen_server2:reply(From, ok) end, - MsgPropsFun, BQS), - OT1 = dict:erase(Txn, OT), - true = lists:usort(AckTags) =:= lists:usort(AckTags1), %% ASSERTION - {ok, State #state { backing_queue_state = BQS, - open_transactions = OT1, - msg_id_ack = MA1 }}; - Status -> - %% We have to cope with the possibility that we'll get - %% promoted before the txn finishes, and rely on slight - %% magic if we do complete here. - Me = self(), - F = fun () -> rabbit_amqqueue:run_backing_queue_async( - Me, rabbit_mirror_queue_master, - fun (rabbit_mirror_queue_master, - State1 = #state { open_transactions = OT2 }) -> - OT3 = case dict:find(Txn, OT2) of - {committing, undefined} -> - dict:store( - Txn, {committed, undefined}, - OT2); - {committing, {committed, From}} -> - gen_server2:reply(From, ok), - dict:erase(Txn, OT2) - end, - State1 #state { open_transactions = OT3 } - end) - end, - {AckTags1, BQS1} = BQ:tx_commit(Txn, F, MsgPropsFun, BQS), - true = lists:usort(AckTags) =:= lists:usort(AckTags1), %% ASSERTION - OT1 = case Status of - error -> - dict:store(Txn, {committing, undefined}, OT); - {open, TxnStatusByChannel} -> - dict:store(Txn, {committing, TxnStatusByChannel}, OT) - end, - {ok, State #state { backing_queue_state = BQS, - open_transactions = OT1, - msg_id_ack = MA1 }}} - end; - process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:delete_and_terminate(BQS), {stop, State #state { backing_queue_state = undefined }}. -msg_ids_to_acktags(MsgIds, MA, RemoveOrKeep) -> +msg_ids_to_acktags(MsgIds, MA) -> {AckTags, MA1} = - lists:foldl(fun (MsgId, {AckTagsN, MAN}) -> - case dict:find(MsgId, MA) of - error -> - {AckTagsN, MAN}; - {ok, AckTag} when RemoveOrKeep =:= remove -> - {[AckTag | AckTagsN], - dict:erase(MsgId, MAN)}; - {ok, AckTag} when RemoveOrKeep =:= keep -> - {[AckTag | AckTagsN], MAN} - end - end, {[], MA}, MsgIds), + lists:foldl( + fun (MsgId, {Acc, MAN}) -> + case dict:find(MsgId, MA) of + error -> {Acc, MAN}; + {ok, AckTag} -> {[AckTag | Acc], dict:erase(MsgId, MAN)} + end + end, {[], MA}, MsgIds), {lists:reverse(AckTags), MA1}. ack_all(BQ, MA, BQS) -> -- cgit v1.2.1 From 3e13f9831edcd4ffc35925b8230b8e4e3b5fd3eb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 11:06:22 +0100 Subject: misc:table_map -> misc:table_filter. --- src/rabbit_binding.erl | 25 ++++++++++--------------- src/rabbit_exchange.erl | 19 +++++++++---------- src/rabbit_misc.erl | 31 ++++++++++++++++--------------- 3 files changed, 35 insertions(+), 40 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 8633ed13..ec64c474 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,29 +97,24 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - rabbit_misc:table_map( - fun (Route = #route{binding = B = - #binding{destination = Dst = - #resource{kind = Kind}}}) -> + rabbit_misc:table_filter( + fun (#route{binding = B = #binding{destination = Dst = + #resource{kind = Kind}}}) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - case mnesia:read({rabbit_durable_route, B}) =/= [] andalso + mnesia:read({rabbit_durable_route, B}) =/= [] andalso sets:is_element(Dst, case Kind of exchange -> XNameSet; queue -> QNameSet - end) of - true -> ok = sync_transient_binding( - Route, fun mnesia:write/3), - B; - false -> none - end + end) end, - fun (none, _Tx) -> - none; - (B = #binding{source = Src}, Tx) -> + fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]), - B + case Tx of + true -> ok = sync_transient_binding(R, fun mnesia:write/3); + false -> ok + end end, rabbit_durable_route), ok. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 623adf0b..3e4edba4 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -83,17 +83,16 @@ -define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments]). recover() -> - Xs = rabbit_misc:table_map( - fun (X = #exchange{name = XName}) -> - case mnesia:read({rabbit_exchange, XName}) of - [] -> ok = mnesia:write(rabbit_exchange, X, write), - X; - [_] -> none - end + Xs = rabbit_misc:table_filter( + fun (#exchange{name = XName}) -> + mnesia:read({rabbit_exchange, XName}) =:= [] end, - fun (none, _Tx) -> none; - (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), - X + fun (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), + case Tx of + true -> ok = mnesia:write(rabbit_exchange, + X, write); + false -> ok + end end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 6bebf005..adc3ae66 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -38,7 +38,7 @@ -export([ensure_ok/2]). -export([makenode/1, nodeparts/1, cookie_hash/0, tcp_name/3]). -export([upmap/2, map_in_order/2]). --export([table_map/3]). +-export([table_filter/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2]). -export([append_file/2, ensure_parent_dirs_exist/1]). @@ -146,7 +146,8 @@ -> atom()). -spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]). -spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]). --spec(table_map/3 :: (fun ((A) -> A), fun ((A, boolean()) -> A), atom()) -> A). +-spec(table_filter/3:: (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'), + atom()) -> [A]). -spec(dirty_read_all/1 :: (atom()) -> [any()]). -spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom()) -> 'ok' | 'aborted'). @@ -461,24 +462,24 @@ map_in_order(F, L) -> lists:reverse( lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)). -%% Fold over each entry in a table, executing the cons function in a +%% Fold over each entry in a table, executing the pre-post-commit function in a %% transaction. This is often far more efficient than wrapping a tx %% around the lot. %% %% We ignore entries that have been modified or removed. -table_map(Fun, PrePostCommitFun, TableName) -> +table_filter(Pred, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> case execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, - read) of - [] -> Acc; - _ -> Fun(E) - end - end, - PrePostCommitFun) of - none -> Acc; - Res -> [Res | Acc] - end + fun (E, Acc) -> execute_mnesia_transaction( + fun () -> case mnesia:match_object(TableName, E, + read) of + [] -> false; + _ -> Pred(E) + end + end, + fun (false, _Tx) -> Acc; + (true, Tx) -> PrePostCommitFun(E, Tx), + [E | Acc] + end) end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From bb80be93ece35590dd9ee1295a866b1d88583ade Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 11:15:57 +0100 Subject: some notes --- src/rabbit_mirror_queue_master.erl | 40 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 387dfbc4..664c706d 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -42,6 +42,46 @@ ack_msg_id }). +%% Some notes on transactions +%% +%% We don't support transactions on mirror queues. To do so is +%% challenging. The underlying bq is free to add the contents of the +%% txn to the queue proper at any point after the tx.commit comes in +%% but before the tx.commit-ok goes out. This means that it is not +%% safe for all mirrors to simply issue the BQ:tx_commit at the same +%% time, as the addition of the txn's contents to the queue may +%% subsequently be inconsistently interwoven with other actions on the +%% BQ. The solution to this is, in the master, wrap the PostCommitFun +%% and do the gm:broadcast in there: at that point, you're in the BQ +%% (well, there's actually nothing to stop that function being invoked +%% by some other process, but let's pretend for now: you could always +%% use run_backing_queue_async to ensure you really are in the queue +%% process), the gm:broadcast is safe because you don't have to worry +%% about races with other gm:broadcast calls (same process). Thus this +%% signal would indicate sufficiently to all the slaves that they must +%% insert the complete contents of the txn at precisely this point in +%% the stream of events. +%% +%% However, it's quite difficult for the slaves to make that happen: +%% they would be forced to issue the tx_commit at that point, but then +%% stall processing any further instructions from gm until they +%% receive the notification from their bq that the tx_commit has fully +%% completed (i.e. they need to treat what is an async system as being +%% fully synchronous). This is not too bad (apart from the +%% vomit-inducing notion of it all): just need a queue of instructions +%% from the GM; but then it gets rather worse when you consider what +%% needs to happen if the master dies at this point and the slave in +%% the middle of this tx_commit needs to be promoted. +%% +%% Finally, we can't possibly hope to make transactions atomic across +%% mirror queues, and it's not even clear that that's desirable: if a +%% slave fails whilst there's an open transaction in progress then +%% when the channel comes to commit the txn, it will detect the +%% failure and destroy the channel. However, the txn will have +%% actually committed successfully in all the other mirrors (including +%% master). To do this bit properly would require 2PC and all the +%% baggage that goes with that. + %% --------------------------------------------------------------------------- %% Backing queue %% --------------------------------------------------------------------------- -- cgit v1.2.1 From 1656e6f9a5083f56ad69082b55dd72f6fddf8a7c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 11:41:08 +0100 Subject: Callbacks should come after mnesia writes. --- src/rabbit_binding.erl | 4 ++-- src/rabbit_exchange.erl | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index ec64c474..611f7909 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -110,11 +110,11 @@ recover(XNames, QNames) -> end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]), case Tx of true -> ok = sync_transient_binding(R, fun mnesia:write/3); false -> ok - end + end, + rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) end, rabbit_durable_route), ok. diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 3e4edba4..a74f9d28 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -87,12 +87,12 @@ recover() -> fun (#exchange{name = XName}) -> mnesia:read({rabbit_exchange, XName}) =:= [] end, - fun (X, Tx) -> rabbit_exchange:callback(X, create, [Tx, X]), - case Tx of + fun (X, Tx) -> case Tx of true -> ok = mnesia:write(rabbit_exchange, X, write); false -> ok - end + end, + rabbit_exchange:callback(X, create, [Tx, X]) end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. -- cgit v1.2.1 From 1392afc76e534fd9cfbfd716c57777ff50c8b76e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 12:03:42 +0100 Subject: Extensions to BQ --- include/rabbit_backing_queue_spec.hrl | 31 ++++--- src/rabbit_amqqueue.erl | 20 +++-- src/rabbit_amqqueue_process.erl | 160 +++++++++++++++++++++------------- src/rabbit_backing_queue.erl | 37 +++++--- src/rabbit_tests.erl | 33 ++++--- src/rabbit_variable_queue.erl | 133 ++++++++++++++++------------ 6 files changed, 249 insertions(+), 165 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index b2bf6bbb..d9296bf6 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -25,23 +25,24 @@ -type(message_properties_transformer() :: fun ((rabbit_types:message_properties()) -> rabbit_types:message_properties())). --type(async_callback() :: fun ((fun ((state()) -> state())) -> 'ok')). --type(sync_callback() :: fun ((fun ((state()) -> state())) -> 'ok' | 'error')). +-type(async_callback() :: fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok')). +-type(sync_callback() :: fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok' | 'error')). -spec(start/1 :: ([rabbit_amqqueue:name()]) -> 'ok'). -spec(stop/0 :: () -> 'ok'). --spec(init/5 :: (rabbit_amqqueue:name(), is_durable(), attempt_recovery(), +-spec(init/4 :: (rabbit_types:amqqueue(), attempt_recovery(), async_callback(), sync_callback()) -> state()). -spec(terminate/1 :: (state()) -> state()). -spec(delete_and_terminate/1 :: (state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). --spec(publish/3 :: (rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) -> state()). --spec(publish_delivered/4 :: (true, rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) +-spec(publish/4 :: (rabbit_types:basic_message(), + rabbit_types:message_properties(), pid(), state()) -> + state()). +-spec(publish_delivered/5 :: (true, rabbit_types:basic_message(), + rabbit_types:message_properties(), pid(), state()) -> {ack(), state()}; (false, rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) + rabbit_types:message_properties(), pid(), state()) -> {undefined, state()}). -spec(drain_confirmed/1 :: (state()) -> {[rabbit_guid:guid()], state()}). -spec(dropwhile/2 :: @@ -49,16 +50,17 @@ -> state()). -spec(fetch/2 :: (true, state()) -> {fetch_result(ack()), state()}; (false, state()) -> {fetch_result(undefined), state()}). --spec(ack/2 :: ([ack()], state()) -> state()). --spec(tx_publish/4 :: (rabbit_types:txn(), rabbit_types:basic_message(), - rabbit_types:message_properties(), state()) -> state()). +-spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}). +-spec(tx_publish/5 :: (rabbit_types:txn(), rabbit_types:basic_message(), + rabbit_types:message_properties(), pid(), state()) -> + state()). -spec(tx_ack/3 :: (rabbit_types:txn(), [ack()], state()) -> state()). -spec(tx_rollback/2 :: (rabbit_types:txn(), state()) -> {[ack()], state()}). -spec(tx_commit/4 :: (rabbit_types:txn(), fun (() -> any()), message_properties_transformer(), state()) -> {[ack()], state()}). -spec(requeue/3 :: ([ack()], message_properties_transformer(), state()) - -> state()). + -> {[rabbit_guid:guid()], state()}). -spec(len/1 :: (state()) -> non_neg_integer()). -spec(is_empty/1 :: (state()) -> boolean()). -spec(set_ram_duration_target/2 :: @@ -68,3 +70,8 @@ -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). +-spec(invoke/3 :: (atom(), fun ((atom(), A) -> A), state()) -> state()). +-spec(is_duplicate/3 :: + (rabbit_types:txn(), rabbit_types:basic_message(), state()) -> + {'false'|'published'|'discarded', state()}). +-spec(discard/3 :: (rabbit_types:basic_message(), pid(), state()) -> state()). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index c7391965..804edc81 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -30,7 +30,7 @@ %% internal -export([internal_declare/2, internal_delete/1, - run_backing_queue/2, run_backing_queue_async/2, + run_backing_queue/3, run_backing_queue_async/3, sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, set_maximum_since_use/2, maybe_expire/1, drop_expired/1, emit_stats/1]). @@ -141,10 +141,12 @@ rabbit_types:connection_exit() | fun ((boolean()) -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). --spec(run_backing_queue/2 :: - (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). --spec(run_backing_queue_async/2 :: - (pid(), (fun ((A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). +-spec(run_backing_queue/3 :: + (pid(), atom(), + (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). +-spec(run_backing_queue_async/3 :: + (pid(), atom(), + (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). @@ -439,11 +441,11 @@ internal_delete(QueueName) -> end end). -run_backing_queue(QPid, Fun) -> - gen_server2:call(QPid, {run_backing_queue, Fun}, infinity). +run_backing_queue(QPid, Mod, Fun) -> + gen_server2:call(QPid, {run_backing_queue, Mod, Fun}, infinity). -run_backing_queue_async(QPid, Fun) -> - gen_server2:cast(QPid, {run_backing_queue, Fun}). +run_backing_queue_async(QPid, Mod, Fun) -> + gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). sync_timeout(QPid) -> gen_server2:cast(QPid, sync_timeout). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 2b0fe17e..110817a9 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -137,8 +137,7 @@ code_change(_OldVsn, State, _Extra) -> %%---------------------------------------------------------------------------- declare(Recover, From, - State = #q{q = Q = #amqqueue{name = QName, durable = IsDurable}, - backing_queue = BQ, backing_queue_state = undefined, + State = #q{q = Q, backing_queue = BQ, backing_queue_state = undefined, stats_timer = StatsTimer}) -> case rabbit_amqqueue:internal_declare(Q, Recover) of not_found -> {stop, normal, not_found, State}; @@ -149,7 +148,7 @@ declare(Recover, From, ok = rabbit_memory_monitor:register( self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}), - BQS = bq_init(BQ, QName, IsDurable, Recover), + BQS = bq_init(BQ, Q, Recover), State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), @@ -159,17 +158,17 @@ declare(Recover, From, Q1 -> {stop, normal, {existing, Q1}, State} end. -bq_init(BQ, QName, IsDurable, Recover) -> +bq_init(BQ, Q, Recover) -> Self = self(), - BQ:init(QName, IsDurable, Recover, - fun (Fun) -> - rabbit_amqqueue:run_backing_queue_async(Self, Fun) + BQ:init(Q, Recover, + fun (Mod, Fun) -> + rabbit_amqqueue:run_backing_queue_async(Self, Mod, Fun) end, - fun (Fun) -> + fun (Mod, Fun) -> rabbit_misc:with_exit_handler( fun () -> error end, fun () -> - rabbit_amqqueue:run_backing_queue(Self, Fun) + rabbit_amqqueue:run_backing_queue(Self, Mod, Fun) end) end). @@ -477,45 +476,70 @@ run_message_queue(State) -> {_IsEmpty1, State2} = deliver_msgs_to_consumers(Funs, IsEmpty, State1), State2. -attempt_delivery(#delivery{txn = none, - sender = ChPid, - message = Message, - msg_seq_no = MsgSeqNo} = Delivery, - State = #q{backing_queue = BQ}) -> +attempt_delivery(Delivery = #delivery{txn = none, + sender = ChPid, + message = Message, + msg_seq_no = MsgSeqNo}, + State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> Confirm = should_confirm_message(Delivery, State), case Confirm of immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok end, - PredFun = fun (IsEmpty, _State) -> not IsEmpty end, - DeliverFun = - fun (AckRequired, false, State1 = #q{backing_queue_state = BQS}) -> - %% we don't need an expiry here because messages are - %% not being enqueued, so we use an empty - %% message_properties. - {AckTag, BQS1} = - BQ:publish_delivered( - AckRequired, Message, - (?BASE_MESSAGE_PROPERTIES)#message_properties{ - needs_confirming = needs_confirming(Confirm)}, - BQS), - {{Message, false, AckTag}, true, - State1#q{backing_queue_state = BQS1}} - end, - {Delivered, State1} = - deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, State), - {Delivered, Confirm, State1}; -attempt_delivery(#delivery{txn = Txn, - sender = ChPid, - message = Message} = Delivery, - State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - store_ch_record((ch_record(ChPid))#cr{txn = Txn}), - BQS1 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, BQS), - {true, should_confirm_message(Delivery, State), - State#q{backing_queue_state = BQS1}}. + case BQ:is_duplicate(none, Message, BQS) of + {false, BQS1} -> + PredFun = fun (IsEmpty, _State) -> not IsEmpty end, + DeliverFun = + fun (AckRequired, false, + State1 = #q{backing_queue_state = BQS2}) -> + %% we don't need an expiry here because + %% messages are not being enqueued, so we use + %% an empty message_properties. + {AckTag, BQS3} = + BQ:publish_delivered( + AckRequired, Message, + (?BASE_MESSAGE_PROPERTIES)#message_properties{ + needs_confirming = needs_confirming(Confirm)}, + ChPid, BQS2), + {{Message, false, AckTag}, true, + State1#q{backing_queue_state = BQS3}} + end, + {Delivered, State2} = + deliver_msgs_to_consumers({ PredFun, DeliverFun }, false, + State#q{backing_queue_state = BQS1}), + {Delivered, Confirm, State2}; + {Duplicate, BQS1} -> + %% if the message has previously been seen by the BQ then + %% it must have been seen under the same circumstances as + %% now: i.e. if it is now a deliver_immediately then it + %% must have been before. + Delivered = case Duplicate of + published -> true; + discarded -> false + end, + {Delivered, Confirm, State#q{backing_queue_state = BQS1}} + end; +attempt_delivery(Delivery = #delivery{txn = Txn, + sender = ChPid, + message = Message}, + State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> + Confirm = should_confirm_message(Delivery, State), + case BQ:is_duplicate(Txn, Message, BQS) of + {false, BQS1} -> + store_ch_record((ch_record(ChPid))#cr{txn = Txn}), + BQS2 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, + BQS1), + {true, Confirm, State#q{backing_queue_state = BQS2}}; + {Duplicate, BQS1} -> + Delivered = case Duplicate of + published -> true; + discarded -> false + end, + {Delivered, Confirm, State#q{backing_queue_state = BQS1}} + end. -deliver_or_enqueue(Delivery = #delivery{message = Message}, State) -> +deliver_or_enqueue(Delivery = #delivery{message = Message, + sender = ChPid}, State) -> {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), State2 = #q{backing_queue = BQ, backing_queue_state = BQS} = maybe_record_confirm_message(Confirm, State1), @@ -525,14 +549,17 @@ deliver_or_enqueue(Delivery = #delivery{message = Message}, State) -> BQ:publish(Message, (message_properties(State)) #message_properties{ needs_confirming = needs_confirming(Confirm)}, - BQS), + ChPid, BQS), ensure_ttl_timer(State2#q{backing_queue_state = BQS1}) end. requeue_and_run(AckTags, State = #q{backing_queue = BQ, ttl=TTL}) -> run_backing_queue( - fun (BQS) -> BQ:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS) end, - State). + BQ, fun (M, BQS) -> + {_MsgIds, BQS1} = + M:requeue(AckTags, reset_msg_expiry_fun(TTL), BQS), + BQS1 + end, State). fetch(AckRequired, State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> @@ -635,10 +662,11 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> - run_backing_queue(fun (BQS) -> BQ:idle_timeout(BQS) end, State). + run_backing_queue(BQ, fun (M, BQS) -> M:idle_timeout(BQS) end, State). -run_backing_queue(Fun, State = #q{backing_queue_state = BQS}) -> - run_message_queue(State#q{backing_queue_state = Fun(BQS)}). +run_backing_queue(Mod, Fun, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + run_message_queue(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)}). commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, State = #q{backing_queue = BQ, @@ -662,6 +690,12 @@ rollback_transaction(Txn, C, State = #q{backing_queue = BQ, subtract_acks(A, B) when is_list(B) -> lists:foldl(fun sets:del_element/2, A, B). +discard_delivery(#delivery{sender = ChPid, + message = Message}, + State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + State#q{backing_queue_state = BQ:discard(Message, ChPid, BQS)}. + reset_msg_expiry_fun(TTL) -> fun(MsgProps) -> MsgProps#message_properties{expiry = calculate_msg_expiry(TTL)} @@ -768,11 +802,11 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {run_backing_queue, _Fun} -> 6; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {run_backing_queue, _Mod, _Fun} -> 6; + _ -> 0 end. prioritise_cast(Msg, _State) -> @@ -788,7 +822,7 @@ prioritise_cast(Msg, _State) -> {reject, _AckTags, _Requeue, _ChPid} -> 7; {notify_sent, _ChPid} -> 7; {unblock, _ChPid} -> 7; - {run_backing_queue, _Fun} -> 6; + {run_backing_queue, _Mod, _Fun} -> 6; sync_timeout -> 6; _ -> 0 end. @@ -807,14 +841,14 @@ handle_call({init, Recover}, From, true -> erlang:monitor(process, Owner), declare(Recover, From, State); false -> #q{backing_queue = BQ, backing_queue_state = undefined, - q = #amqqueue{name = QName, durable = IsDurable}} = State, + q = #amqqueue{name = QName} = Q} = State, gen_server2:reply(From, not_found), case Recover of true -> ok; _ -> rabbit_log:warning( "Queue ~p exclusive owner went away~n", [QName]) end, - BQS = bq_init(BQ, QName, IsDurable, Recover), + BQS = bq_init(BQ, Q, Recover), %% Rely on terminate to delete the queue. {stop, normal, State#q{backing_queue_state = BQS}} end; @@ -848,7 +882,7 @@ handle_call({deliver_immediately, Delivery}, _From, State) -> {Delivered, Confirm, State1} = attempt_delivery(Delivery, State), reply(Delivered, case Delivered of true -> maybe_record_confirm_message(Confirm, State1); - false -> State1 + false -> discard_delivery(Delivery, State1) end); handle_call({deliver, Delivery}, From, State) -> @@ -1004,12 +1038,12 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> noreply(requeue_and_run(AckTags, State)) end; -handle_call({run_backing_queue, Fun}, _From, State) -> - reply(ok, run_backing_queue(Fun, State)). +handle_call({run_backing_queue, Mod, Fun}, _From, State) -> + reply(ok, run_backing_queue(Mod, Fun, State)). -handle_cast({run_backing_queue, Fun}, State) -> - noreply(run_backing_queue(Fun, State)); +handle_cast({run_backing_queue, Mod, Fun}, State) -> + noreply(run_backing_queue(Mod, Fun, State)); handle_cast(sync_timeout, State) -> noreply(backing_queue_idle_timeout(State#q{sync_timer_ref = undefined})); @@ -1028,7 +1062,7 @@ handle_cast({ack, Txn, AckTags, ChPid}, case Txn of none -> ChAckTags1 = subtract_acks(ChAckTags, AckTags), NewC = C#cr{acktags = ChAckTags1}, - BQS1 = BQ:ack(AckTags, BQS), + {_Guids, BQS1} = BQ:ack(AckTags, BQS), {NewC, State#q{backing_queue_state = BQS1}}; _ -> BQS1 = BQ:tx_ack(Txn, AckTags, BQS), {C#cr{txn = Txn}, @@ -1049,7 +1083,7 @@ handle_cast({reject, AckTags, Requeue, ChPid}, maybe_store_ch_record(C#cr{acktags = ChAckTags1}), noreply(case Requeue of true -> requeue_and_run(AckTags, State); - false -> BQS1 = BQ:ack(AckTags, BQS), + false -> {_Guids, BQS1} = BQ:ack(AckTags, BQS), State#q{backing_queue_state = BQS1} end) end; diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 0ca8d260..0955a080 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -35,19 +35,18 @@ behaviour_info(callbacks) -> %% Initialise the backing queue and its state. %% %% Takes - %% 1. the queue name - %% 2. a boolean indicating whether the queue is durable - %% 3. a boolean indicating whether the queue is an existing queue + %% 1. the amqqueue record + %% 2. a boolean indicating whether the queue is an existing queue %% that should be recovered - %% 4. an asynchronous callback which accepts a function of type + %% 3. an asynchronous callback which accepts a function of type %% backing-queue-state to backing-queue-state. This callback %% function can be safely invoked from any process, which %% makes it useful for passing messages back into the backing %% queue, especially as the backing queue does not have %% control of its own mailbox. - %% 5. a synchronous callback. Same as the asynchronous callback + %% 4. a synchronous callback. Same as the asynchronous callback %% but waits for completion and returns 'error' on error. - {init, 5}, + {init, 4}, %% Called on queue shutdown when queue isn't being deleted. {terminate, 1}, @@ -61,12 +60,12 @@ behaviour_info(callbacks) -> {purge, 1}, %% Publish a message. - {publish, 3}, + {publish, 4}, %% Called for messages which have already been passed straight %% out to a client. The queue will be empty for these calls %% (i.e. saves the round trip through the backing queue). - {publish_delivered, 4}, + {publish_delivered, 5}, %% Return ids of messages which have been confirmed since %% the last invocation of this function (or initialisation). @@ -109,7 +108,7 @@ behaviour_info(callbacks) -> {ack, 2}, %% A publish, but in the context of a transaction. - {tx_publish, 4}, + {tx_publish, 5}, %% Acks, but in the context of a transaction. {tx_ack, 3}, @@ -165,7 +164,25 @@ behaviour_info(callbacks) -> %% Exists for debugging purposes, to be able to expose state via %% rabbitmqctl list_queues backing_queue_status - {status, 1} + {status, 1}, + + %% Passed a function to be invoked with the relevant backing + %% queue's state. Useful for when the backing queue or other + %% components need to pass functions into the backing queue. + {invoke, 3}, + + %% Called prior to a publish or publish_delivered call. Allows + %% the BQ to signal that it's already seen this message (and in + %% what capacity - i.e. was it published previously or discarded + %% previously) and thus the message should be dropped. + {is_duplicate, 3}, + + %% Called to inform the BQ about messages which have reached the + %% queue, but are not going to be further passed to BQ for some + %% reason. Note that this is may be invoked for messages for + %% which BQ:is_duplicate/2 has already returned {'published' | + %% 'discarded', BQS}. + {discard, 3} ]; behaviour_info(_Other) -> undefined. diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 294fae97..2ef07071 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2072,9 +2072,9 @@ test_queue_index() -> passed. -variable_queue_init(QName, IsDurable, Recover) -> - rabbit_variable_queue:init(QName, IsDurable, Recover, - fun nop/1, fun nop/1, fun nop/2, fun nop/1). +variable_queue_init(Q, Recover) -> + rabbit_variable_queue:init( + Q, Recover, fun nop/1, fun nop/1, fun nop/2, fun nop/1). variable_queue_publish(IsPersistent, Count, VQ) -> lists:foldl( @@ -2086,7 +2086,7 @@ variable_queue_publish(IsPersistent, Count, VQ) -> true -> 2; false -> 1 end}, <<>>), - #message_properties{}, VQN) + #message_properties{}, self(), VQN) end, VQ, lists:seq(1, Count)). variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) -> @@ -2104,9 +2104,13 @@ assert_prop(List, Prop, Value) -> assert_props(List, PropVals) -> [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals]. +test_amqqueue(Durable) -> + (rabbit_amqqueue:pseudo_queue(test_queue(), self())) + #amqqueue { durable = Durable }. + with_fresh_variable_queue(Fun) -> ok = empty_test_queue(), - VQ = variable_queue_init(test_queue(), true, false), + VQ = variable_queue_init(test_amqqueue(true), false), S0 = rabbit_variable_queue:status(VQ), assert_props(S0, [{q1, 0}, {q2, 0}, {delta, {delta, undefined, 0, undefined}}, @@ -2164,7 +2168,7 @@ test_dropwhile(VQ0) -> rabbit_basic:message( rabbit_misc:r(<<>>, exchange, <<>>), <<>>, #'P_basic'{}, <<>>), - #message_properties{expiry = N}, VQN) + #message_properties{expiry = N}, self(), VQN) end, VQ0, lists:seq(1, Count)), %% drop the first 5 messages @@ -2208,7 +2212,7 @@ test_variable_queue_dynamic_duration_change(VQ0) -> %% drain {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7), - VQ9 = rabbit_variable_queue:ack(AckTags, VQ8), + {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8), {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9), VQ10. @@ -2218,7 +2222,7 @@ publish_fetch_and_ack(0, _Len, VQ0) -> publish_fetch_and_ack(N, Len, VQ0) -> VQ1 = variable_queue_publish(false, 1, VQ0), {{_Msg, false, AckTag, Len}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), - VQ3 = rabbit_variable_queue:ack([AckTag], VQ2), + {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2), publish_fetch_and_ack(N-1, Len, VQ3). test_variable_queue_partial_segments_delta_thing(VQ0) -> @@ -2252,7 +2256,7 @@ test_variable_queue_partial_segments_delta_thing(VQ0) -> {len, HalfSegment + 1}]), {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false, HalfSegment + 1, VQ7), - VQ9 = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8), + {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8), %% should be empty now {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9), VQ10. @@ -2281,7 +2285,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = variable_queue_init(test_queue(), true, true), + VQ7 = variable_queue_init(test_amqqueue(true), true), {{_Msg1, true, _AckTag1, Count1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), VQ9 = variable_queue_publish(false, 1, VQ8), @@ -2294,17 +2298,18 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> VQ1 = rabbit_variable_queue:set_ram_duration_target(0, VQ0), VQ2 = variable_queue_publish(false, 4, VQ1), {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2), - VQ4 = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), + {_Guids, VQ4} = + rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:idle_timeout(VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), - VQ7 = variable_queue_init(test_queue(), true, true), + VQ7 = variable_queue_init(test_amqqueue(true), true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), VQ8. test_queue_recover() -> Count = 2 * rabbit_queue_index:next_segment_boundary(0), TxID = rabbit_guid:guid(), - {new, #amqqueue { pid = QPid, name = QName }} = + {new, #amqqueue { pid = QPid, name = QName } = Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), [begin Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), @@ -2328,7 +2333,7 @@ test_queue_recover() -> {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} = rabbit_amqqueue:basic_get(Q1, self(), false), exit(QPid1, shutdown), - VQ1 = variable_queue_init(QName, true, true), + VQ1 = variable_queue_init(Q, true), {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index ff7252fd..7a3c17a2 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -16,18 +16,19 @@ -module(rabbit_variable_queue). --export([init/5, terminate/1, delete_and_terminate/1, - purge/1, publish/3, publish_delivered/4, drain_confirmed/1, - fetch/2, ack/2, tx_publish/4, tx_ack/3, tx_rollback/2, tx_commit/4, +-export([init/4, terminate/1, delete_and_terminate/1, + purge/1, publish/4, publish_delivered/5, drain_confirmed/1, + fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, - status/1, multiple_routing_keys/0]). + status/1, invoke/3, is_duplicate/3, discard/3, + multiple_routing_keys/0]). -export([start/1, stop/0]). %% exported for testing only --export([start_msg_store/2, stop_msg_store/0, init/7]). +-export([start_msg_store/2, stop_msg_store/0, init/6]). %%---------------------------------------------------------------------------- %% Definitions: @@ -408,15 +409,15 @@ stop_msg_store() -> ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). -init(QueueName, IsDurable, Recover, AsyncCallback, SyncCallback) -> - init(QueueName, IsDurable, Recover, AsyncCallback, SyncCallback, +init(Queue, Recover, AsyncCallback, SyncCallback) -> + init(Queue, Recover, AsyncCallback, SyncCallback, fun (MsgIds, ActionTaken) -> msgs_written_to_disk(AsyncCallback, MsgIds, ActionTaken) end, fun (MsgIds) -> msg_indices_written_to_disk(AsyncCallback, MsgIds) end). -init(QueueName, IsDurable, false, AsyncCallback, SyncCallback, - MsgOnDiskFun, MsgIdxOnDiskFun) -> +init(#amqqueue { name = QueueName, durable = IsDurable }, false, + AsyncCallback, SyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) -> IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), init(IsDurable, IndexState, 0, [], AsyncCallback, SyncCallback, case IsDurable of @@ -426,8 +427,8 @@ init(QueueName, IsDurable, false, AsyncCallback, SyncCallback, end, msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback)); -init(QueueName, true, true, AsyncCallback, SyncCallback, - MsgOnDiskFun, MsgIdxOnDiskFun) -> +init(#amqqueue { name = QueueName, durable = true }, true, + AsyncCallback, SyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) -> Terms = rabbit_queue_index:shutdown_terms(QueueName), {PRef, TRef, Terms1} = case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of @@ -517,13 +518,14 @@ purge(State = #vqstate { q4 = Q4, ram_index_count = 0, persistent_count = PCount1 })}. -publish(Msg, MsgProps, State) -> +publish(Msg, MsgProps, _ChPid, State) -> {_SeqId, State1} = publish(Msg, MsgProps, false, false, State), a(reduce_memory_use(State1)). publish_delivered(false, #basic_message { id = MsgId }, #message_properties { needs_confirming = NeedsConfirming }, - State = #vqstate { async_callback = Callback, len = 0 }) -> + _ChPid, State = #vqstate { async_callback = Callback, + len = 0 }) -> case NeedsConfirming of true -> blind_confirm(Callback, gb_sets:singleton(MsgId)); false -> ok @@ -533,13 +535,13 @@ publish_delivered(true, Msg = #basic_message { is_persistent = IsPersistent, id = MsgId }, MsgProps = #message_properties { needs_confirming = NeedsConfirming }, - State = #vqstate { len = 0, - next_seq_id = SeqId, - out_counter = OutCount, - in_counter = InCount, - persistent_count = PCount, - durable = IsDurable, - unconfirmed = UC }) -> + _ChPid, State = #vqstate { len = 0, + next_seq_id = SeqId, + out_counter = OutCount, + in_counter = InCount, + persistent_count = PCount, + durable = IsDurable, + unconfirmed = UC }) -> IsPersistent1 = IsDurable andalso IsPersistent, MsgStatus = (msg_status(IsPersistent1, SeqId, Msg, MsgProps)) #msg_status { is_delivered = true }, @@ -665,13 +667,14 @@ internal_fetch(AckRequired, MsgStatus = #msg_status { persistent_count = PCount1 })}. ack(AckTags, State) -> - a(ack(fun msg_store_remove/3, - fun (_, State0) -> State0 end, - AckTags, State)). + {MsgIds, State1} = ack(fun msg_store_remove/3, + fun (_, State0) -> State0 end, + AckTags, State), + {MsgIds, a(State1)}. tx_publish(Txn, Msg = #basic_message { is_persistent = IsPersistent }, MsgProps, - State = #vqstate { durable = IsDurable, - msg_store_clients = MSCState }) -> + _ChPid, State = #vqstate { durable = IsDurable, + msg_store_clients = MSCState }) -> Tx = #tx { pending_messages = Pubs } = lookup_tx(Txn), store_tx(Txn, Tx #tx { pending_messages = [{Msg, MsgProps} | Pubs] }), case IsPersistent andalso IsDurable of @@ -727,7 +730,7 @@ requeue(AckTags, MsgPropsFun, State) -> (MsgPropsFun(MsgProps)) #message_properties { needs_confirming = false } end, - a(reduce_memory_use( + {MsgIds, State1} = ack(fun (_, _, _) -> ok end, fun (#msg_status { msg = Msg, msg_props = MsgProps }, State1) -> {_SeqId, State2} = publish(Msg, MsgPropsFun1(MsgProps), @@ -742,7 +745,8 @@ requeue(AckTags, MsgPropsFun, State) -> true, true, State2), State3 end, - AckTags, State))). + AckTags, State), + {MsgIds, a(reduce_memory_use(State1))}. len(#vqstate { len = Len }) -> Len. @@ -880,6 +884,13 @@ status(#vqstate { {avg_ack_ingress_rate, AvgAckIngressRate}, {avg_ack_egress_rate , AvgAckEgressRate} ]. +invoke(?MODULE, Fun, State) -> + Fun(?MODULE, State). + +is_duplicate(_Txn, _Msg, State) -> {false, State}. + +discard(_Msg, _ChPid, State) -> State. + %%---------------------------------------------------------------------------- %% Minor helpers %%---------------------------------------------------------------------------- @@ -954,8 +965,8 @@ msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) -> msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) -> CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE), - rabbit_msg_store:client_init( - MsgStore, Ref, MsgOnDiskFun, fun () -> Callback(CloseFDsFun) end). + rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun, + fun () -> Callback(?MODULE, CloseFDsFun) end). msg_store_write(MSCState, IsPersistent, MsgId, Msg) -> with_immutable_msg_store_state( @@ -983,7 +994,7 @@ msg_store_close_fds(MSCState, IsPersistent) -> fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end). msg_store_close_fds_fun(IsPersistent) -> - fun (State = #vqstate { msg_store_clients = MSCState }) -> + fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) -> {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent), State #vqstate { msg_store_clients = MSCState1 } end. @@ -1129,7 +1140,8 @@ blank_rate(Timestamp, IngressLength) -> msg_store_callback(PersistentMsgIds, Pubs, AckTags, Fun, MsgPropsFun, AsyncCallback, SyncCallback) -> - case SyncCallback(fun (StateN) -> + case SyncCallback(?MODULE, + fun (?MODULE, StateN) -> tx_commit_post_msg_store(true, Pubs, AckTags, Fun, MsgPropsFun, StateN) end) of @@ -1192,20 +1204,21 @@ tx_commit_index(State = #vqstate { on_sync = #sync { Acks = lists:append(SAcks), Pubs = [{Msg, Fun(MsgProps)} || {Fun, PubsN} <- lists:reverse(SPubs), {Msg, MsgProps} <- lists:reverse(PubsN)], - {SeqIds, State1 = #vqstate { index_state = IndexState }} = + {_MsgIds, State1} = ack(Acks, State), + {SeqIds, State2 = #vqstate { index_state = IndexState }} = lists:foldl( fun ({Msg = #basic_message { is_persistent = IsPersistent }, MsgProps}, - {SeqIdsAcc, State2}) -> + {SeqIdsAcc, State3}) -> IsPersistent1 = IsDurable andalso IsPersistent, - {SeqId, State3} = - publish(Msg, MsgProps, false, IsPersistent1, State2), - {cons_if(IsPersistent1, SeqId, SeqIdsAcc), State3} - end, {PAcks, ack(Acks, State)}, Pubs), + {SeqId, State4} = + publish(Msg, MsgProps, false, IsPersistent1, State3), + {cons_if(IsPersistent1, SeqId, SeqIdsAcc), State4} + end, {PAcks, State1}, Pubs), IndexState1 = rabbit_queue_index:sync(SeqIds, IndexState), [ Fun() || Fun <- lists:reverse(SFuns) ], reduce_memory_use( - State1 #vqstate { index_state = IndexState1, on_sync = ?BLANK_SYNC }). + State2 #vqstate { index_state = IndexState1, on_sync = ?BLANK_SYNC }). purge_betas_and_deltas(LensByStore, State = #vqstate { q3 = Q3, @@ -1352,7 +1365,7 @@ remove_pending_ack(KeepPersistent, State = #vqstate { pending_ack = PA, index_state = IndexState, msg_store_clients = MSCState }) -> - {PersistentSeqIds, MsgIdsByStore} = + {PersistentSeqIds, MsgIdsByStore, _AllMsgIds} = dict:fold(fun accumulate_ack/3, accumulate_ack_init(), PA), State1 = State #vqstate { pending_ack = dict:new(), ram_ack_index = gb_trees:empty() }, @@ -1371,9 +1384,9 @@ remove_pending_ack(KeepPersistent, end. ack(_MsgStoreFun, _Fun, [], State) -> - State; + {[], State}; ack(MsgStoreFun, Fun, AckTags, State) -> - {{PersistentSeqIds, MsgIdsByStore}, + {{PersistentSeqIds, MsgIdsByStore, AllMsgIds}, State1 = #vqstate { index_state = IndexState, msg_store_clients = MSCState, persistent_count = PCount, @@ -1393,21 +1406,24 @@ ack(MsgStoreFun, Fun, AckTags, State) -> || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)], PCount1 = PCount - find_persistent_count(sum_msg_ids_by_store_to_len( orddict:new(), MsgIdsByStore)), - State1 #vqstate { index_state = IndexState1, - persistent_count = PCount1, - ack_out_counter = AckOutCount + length(AckTags) }. + {lists:reverse(AllMsgIds), + State1 #vqstate { index_state = IndexState1, + persistent_count = PCount1, + ack_out_counter = AckOutCount + length(AckTags) }}. -accumulate_ack_init() -> {[], orddict:new()}. +accumulate_ack_init() -> {[], orddict:new(), []}. accumulate_ack(_SeqId, #msg_status { is_persistent = false, %% ASSERTIONS msg_on_disk = false, - index_on_disk = false }, - {PersistentSeqIdsAcc, MsgIdsByStore}) -> - {PersistentSeqIdsAcc, MsgIdsByStore}; + index_on_disk = false, + msg_id = MsgId }, + {PersistentSeqIdsAcc, MsgIdsByStore, AllMsgIds}) -> + {PersistentSeqIdsAcc, MsgIdsByStore, [MsgId | AllMsgIds]}; accumulate_ack(SeqId, {IsPersistent, MsgId, _MsgProps}, - {PersistentSeqIdsAcc, MsgIdsByStore}) -> + {PersistentSeqIdsAcc, MsgIdsByStore, AllMsgIds}) -> {cons_if(IsPersistent, SeqId, PersistentSeqIdsAcc), - rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore)}. + rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore), + [MsgId | AllMsgIds]}. find_persistent_count(LensByStore) -> case orddict:find(true, LensByStore) of @@ -1451,14 +1467,16 @@ needs_index_sync(#vqstate { msg_indices_on_disk = MIOD, not (gb_sets:is_empty(UC) orelse gb_sets:is_subset(UC, MIOD)). blind_confirm(Callback, MsgIdSet) -> - Callback(fun (State) -> record_confirms(MsgIdSet, State) end). + Callback(?MODULE, + fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end). msgs_written_to_disk(Callback, MsgIdSet, removed) -> blind_confirm(Callback, MsgIdSet); msgs_written_to_disk(Callback, MsgIdSet, written) -> - Callback(fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> + Callback(?MODULE, + fun (?MODULE, State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> Confirmed = gb_sets:intersection(UC, MsgIdSet), record_confirms(gb_sets:intersection(MsgIdSet, MIOD), State #vqstate { @@ -1467,9 +1485,10 @@ msgs_written_to_disk(Callback, MsgIdSet, written) -> end). msg_indices_written_to_disk(Callback, MsgIdSet) -> - Callback(fun (State = #vqstate { msgs_on_disk = MOD, - msg_indices_on_disk = MIOD, - unconfirmed = UC }) -> + Callback(?MODULE, + fun (?MODULE, State = #vqstate { msgs_on_disk = MOD, + msg_indices_on_disk = MIOD, + unconfirmed = UC }) -> Confirmed = gb_sets:intersection(UC, MsgIdSet), record_confirms(gb_sets:intersection(MsgIdSet, MOD), State #vqstate { -- cgit v1.2.1 From 77784cf6bd022e7a83bd8637d459b33949dca618 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 12:10:17 +0100 Subject: Go back to add_binding. --- include/rabbit_exchange_type_spec.hrl | 4 ++-- src/rabbit_binding.erl | 4 ++-- src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_direct.erl | 4 ++-- src/rabbit_exchange_type_fanout.erl | 4 ++-- src/rabbit_exchange_type_headers.erl | 4 ++-- src/rabbit_exchange_type_topic.erl | 10 ++++------ src/rabbit_tests.erl | 2 +- 8 files changed, 16 insertions(+), 18 deletions(-) diff --git a/include/rabbit_exchange_type_spec.hrl b/include/rabbit_exchange_type_spec.hrl index fd3ddf7e..c80cc196 100644 --- a/include/rabbit_exchange_type_spec.hrl +++ b/include/rabbit_exchange_type_spec.hrl @@ -23,8 +23,8 @@ -spec(create/2 :: (boolean(), rabbit_types:exchange()) -> 'ok'). -spec(delete/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). --spec(add_bindings/3 :: (boolean(), rabbit_types:exchange(), - [rabbit_types:binding()]) -> 'ok'). +-spec(add_binding/3 :: (boolean(), rabbit_types:exchange(), + rabbit_types:binding()) -> 'ok'). -spec(remove_bindings/3 :: (boolean(), rabbit_types:exchange(), [rabbit_types:binding()]) -> 'ok'). -spec(assert_args_equivalence/2 :: diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 611f7909..0fb0baf3 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -114,7 +114,7 @@ recover(XNames, QNames) -> true -> ok = sync_transient_binding(R, fun mnesia:write/3); false -> ok end, - rabbit_exchange:callback(X, add_bindings, [Tx, X, [B]]) + rabbit_exchange:callback(X, add_binding, [Tx, X, B]) end, rabbit_durable_route), ok. @@ -150,7 +150,7 @@ add(Src, Dst, B) -> true -> ok = sync_binding(B, Durable, fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback( - Src, add_bindings, [Tx, Src, [B]]), + Src, add_binding, [Tx, Src, B]), rabbit_event:notify_if( not Tx, binding_created, info(B)) end; diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index 0fede0be..b2400098 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -33,7 +33,7 @@ behaviour_info(callbacks) -> {delete, 3}, %% called after a binding has been added or bindings have been recovered - {add_bindings, 3}, + {add_binding, 3}, %% called after bindings have been deleted. {remove_bindings, 3}, diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl index 4c56a1f8..40078b1a 100644 --- a/src/rabbit_exchange_type_direct.erl +++ b/src/rabbit_exchange_type_direct.erl @@ -21,7 +21,7 @@ -export([description/0, route/2]). -export([validate/1, create/2, delete/3, - add_bindings/3, remove_bindings/3, assert_args_equivalence/2]). + add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). -rabbit_boot_step({?MODULE, @@ -42,7 +42,7 @@ route(#exchange{name = Name}, validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl index 62568949..f32ef917 100644 --- a/src/rabbit_exchange_type_fanout.erl +++ b/src/rabbit_exchange_type_fanout.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -41,7 +41,7 @@ route(#exchange{name = Name}, _Delivery) -> validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl index 258e785a..139feb04 100644 --- a/src/rabbit_exchange_type_headers.erl +++ b/src/rabbit_exchange_type_headers.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -115,7 +115,7 @@ headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest], validate(_X) -> ok. create(_Tx, _X) -> ok. delete(_Tx, _X, _Bs) -> ok. -add_bindings(_Tx, _X, _Bs) -> ok. +add_binding(_Tx, _X, _B) -> ok. remove_bindings(_Tx, _X, _Bs) -> ok. assert_args_equivalence(X, Args) -> rabbit_exchange:assert_args_equivalence(X, Args). diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index 2c995df8..cdc95226 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -21,7 +21,7 @@ -behaviour(rabbit_exchange_type). -export([description/0, route/2]). --export([validate/1, create/2, delete/3, add_bindings/3, +-export([validate/1, create/2, delete/3, add_binding/3, remove_bindings/3, assert_args_equivalence/2]). -include("rabbit_exchange_type_spec.hrl"). @@ -56,12 +56,10 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_bindings(true, _X, Bs) -> +add_binding(true, _X, B) -> rabbit_misc:execute_mnesia_transaction( - fun () -> - lists:foreach(fun (B) -> internal_add_binding(B) end, Bs) - end); -add_bindings(false, _X, _Bs) -> + fun () -> internal_add_binding(B) end); +add_binding(false, _X, _B) -> ok. remove_bindings(true, #exchange{name = X}, Bs) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index e618156b..c029412d 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -629,7 +629,7 @@ test_topic_matching() -> {"#.#.#", "t24"}, {"*", "t25"}, {"#.b.#", "t26"}]], - lists:foreach(fun (B) -> exchange_op_callback(X, add_bindings, [[B]]) end, + lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end, Bindings), %% test some matches -- cgit v1.2.1 From 13b5acf1f9bab327405c73939cf7cb11df97530e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 12:13:34 +0100 Subject: Improve comment, minimise difference from default. --- src/rabbit_exchange_type.erl | 2 +- src/rabbit_exchange_type_topic.erl | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl index b2400098..cd96407c 100644 --- a/src/rabbit_exchange_type.erl +++ b/src/rabbit_exchange_type.erl @@ -32,7 +32,7 @@ behaviour_info(callbacks) -> %% called after exchange (auto)deletion. {delete, 3}, - %% called after a binding has been added or bindings have been recovered + %% called after a binding has been added or recovered {add_binding, 3}, %% called after bindings have been deleted. diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl index cdc95226..5cec5b41 100644 --- a/src/rabbit_exchange_type_topic.erl +++ b/src/rabbit_exchange_type_topic.erl @@ -56,10 +56,9 @@ delete(true, #exchange{name = X}, _Bs) -> delete(false, _Exchange, _Bs) -> ok. -add_binding(true, _X, B) -> - rabbit_misc:execute_mnesia_transaction( - fun () -> internal_add_binding(B) end); -add_binding(false, _X, _B) -> +add_binding(true, _Exchange, Binding) -> + internal_add_binding(Binding); +add_binding(false, _Exchange, _Binding) -> ok. remove_bindings(true, #exchange{name = X}, Bs) -> -- cgit v1.2.1 From b3eb94ab9da72a897860109e80591b10e3fa4f08 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 12:49:52 +0100 Subject: Cut down on reads. --- src/rabbit_binding.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 0fb0baf3..c9106711 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -102,11 +102,11 @@ recover(XNames, QNames) -> #resource{kind = Kind}}}) -> %% The check against rabbit_durable_route is in case it %% disappeared between getting the list and here - mnesia:read({rabbit_durable_route, B}) =/= [] andalso - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end) + sets:is_element(Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) andalso + mnesia:read({rabbit_durable_route, B}) =/= [] end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> {ok, X} = rabbit_exchange:lookup(Src), -- cgit v1.2.1 From a10d90887efdd9eb0f8a588a8a8c94b15d1eb0aa Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 12:55:42 +0100 Subject: cosmetic --- src/rabbit_binding.erl | 2 +- src/rabbit_exchange.erl | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index c9106711..ca7be59a 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -109,11 +109,11 @@ recover(XNames, QNames) -> mnesia:read({rabbit_durable_route, B}) =/= [] end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> - {ok, X} = rabbit_exchange:lookup(Src), case Tx of true -> ok = sync_transient_binding(R, fun mnesia:write/3); false -> ok end, + {ok, X} = rabbit_exchange:lookup(Src), rabbit_exchange:callback(X, add_binding, [Tx, X, B]) end, rabbit_durable_route), diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index a74f9d28..42111773 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -87,12 +87,12 @@ recover() -> fun (#exchange{name = XName}) -> mnesia:read({rabbit_exchange, XName}) =:= [] end, - fun (X, Tx) -> case Tx of - true -> ok = mnesia:write(rabbit_exchange, - X, write); - false -> ok - end, - rabbit_exchange:callback(X, create, [Tx, X]) + fun (X, Tx) -> + case Tx of + true -> ok = mnesia:write(rabbit_exchange, X, write); + false -> ok + end, + rabbit_exchange:callback(X, create, [Tx, X]) end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. -- cgit v1.2.1 From 669d7135ff55fefec095ccfe0235dd8dc4db6697 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 13:56:09 +0100 Subject: Keys not key. --- src/rabbit_trace.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 8f531808..4c2ae858 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -116,7 +116,7 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, {<<"user_id">>, longstr, UserId}, {<<"app_id">>, longstr, AppId}]), {[{<<"exchange_name">>, longstr, XName}, - {<<"routing_key">>, array, [{longstr, K} || K <- RoutingKeys]}, + {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, {<<"headers">>, table, Headers1}, {<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], list_to_binary(lists:reverse(PFR))}. -- cgit v1.2.1 From ce51765ac7299ea27796d57c3903a15e4f4120ca Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 14:12:26 +0100 Subject: Abstract out mainly timer maintanence functions --- src/rabbit_amqqueue_process.erl | 54 +++++++------------ src/rabbit_amqqueue_process_utils.erl | 99 +++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+), 35 deletions(-) create mode 100644 src/rabbit_amqqueue_process_utils.erl diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 2b0fe17e..435edc07 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -21,8 +21,6 @@ -behaviour(gen_server2). -define(UNSENT_MESSAGE_LIMIT, 100). --define(SYNC_INTERVAL, 25). %% milliseconds --define(RAM_DURATION_UPDATE_INTERVAL, 5000). -define(BASE_MESSAGE_PROPERTIES, #message_properties{expiry = undefined, needs_confirming = false}). @@ -226,37 +224,27 @@ next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> false -> {stop_sync_timer(State1), hibernate} end. -ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> - {ok, TRef} = timer:apply_after( - ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), - State#q{sync_timer_ref = TRef}; ensure_sync_timer(State) -> - State. + rabbit_amqqueue_process_utils:ensure_sync_timer( + fun sync_timer_getter/1, fun sync_timer_setter/2, State). + +stop_sync_timer(State) -> + rabbit_amqqueue_process_utils:stop_sync_timer( + fun sync_timer_getter/1, fun sync_timer_setter/2, State). + +sync_timer_getter(State) -> State#q.sync_timer_ref. +sync_timer_setter(Timer, State) -> State#q{sync_timer_ref = Timer}. -stop_sync_timer(State = #q{sync_timer_ref = undefined}) -> - State; -stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> - {ok, cancel} = timer:cancel(TRef), - State#q{sync_timer_ref = undefined}. - -ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> - {ok, TRef} = timer:apply_after( - ?RAM_DURATION_UPDATE_INTERVAL, - rabbit_amqqueue, update_ram_duration, - [self()]), - State#q{rate_timer_ref = TRef}; -ensure_rate_timer(State = #q{rate_timer_ref = just_measured}) -> - State#q{rate_timer_ref = undefined}; ensure_rate_timer(State) -> - State. + rabbit_amqqueue_process_utils:ensure_rate_timer( + fun rate_timer_getter/1, fun rate_timer_setter/2, State). -stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> - State; -stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> - State#q{rate_timer_ref = undefined}; -stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> - {ok, cancel} = timer:cancel(TRef), - State#q{rate_timer_ref = undefined}. +stop_rate_timer(State) -> + rabbit_amqqueue_process_utils:stop_rate_timer( + fun rate_timer_getter/1, fun rate_timer_setter/2, State). + +rate_timer_getter(State) -> State#q.rate_timer_ref. +rate_timer_setter(Timer, State) -> State#q{rate_timer_ref = Timer}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; @@ -1160,15 +1148,11 @@ handle_pre_hibernate(State = #q{backing_queue_state = undefined}) -> handle_pre_hibernate(State = #q{backing_queue = BQ, backing_queue_state = BQS, stats_timer = StatsTimer}) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - BQS3 = BQ:handle_pre_hibernate(BQS2), + BQS1 = rabbit_amqqueue_process_utils:backing_queue_pre_hibernate(BQ, BQS), rabbit_event:if_enabled(StatsTimer, fun () -> emit_stats(State, [{idle_since, now()}]) end), State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer), - backing_queue_state = BQS3}, + backing_queue_state = BQS1}, {hibernate, stop_rate_timer(State1)}. diff --git a/src/rabbit_amqqueue_process_utils.erl b/src/rabbit_amqqueue_process_utils.erl new file mode 100644 index 00000000..feb2a79c --- /dev/null +++ b/src/rabbit_amqqueue_process_utils.erl @@ -0,0 +1,99 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License at +%% http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +%% License for the specific language governing rights and limitations +%% under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 201-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_amqqueue_process_utils). + +-define(SYNC_INTERVAL, 25). %% milliseconds +-define(RAM_DURATION_UPDATE_INTERVAL, 5000). + +-export([backing_queue_pre_hibernate/2, + ensure_sync_timer/3, stop_sync_timer/3, + ensure_rate_timer/3, stop_rate_timer/3]). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-type(bq_mod() :: atom()). +-type(bq_state() :: any()). %% A good example of dialyzer's shortcomings + +-type(queue_state() :: any()). %% Another such example. +-type(getter(A) :: fun ((queue_state()) -> A)). +-type(setter(A) :: fun ((A, queue_state()) -> queue_state())). + +-type(tref() :: term()). %% Sigh. According to timer docs. + +-spec(backing_queue_pre_hibernate/2 :: (bq_mod(), bq_state()) -> bq_state()). + +-spec(ensure_sync_timer/3 :: (getter('undefined'|tref()), + setter('undefined'|tref()), + queue_state()) -> queue_state()). +-spec(stop_sync_timer/3 :: (getter('undefined'|tref()), + setter('undefined'|tref()), + queue_state()) -> queue_state()). + +-spec(ensure_rate_timer/3 :: (getter('undefined'|'just_measured'|tref()), + setter('undefined'|'just_measured'|tref()), + queue_state()) -> queue_state()). +-spec(stop_rate_timer/3 :: (getter('undefined'|'just_measured'|tref()), + setter('undefined'|'just_measured'|tref()), + queue_state()) -> queue_state()). + +-endif. + +%%---------------------------------------------------------------------------- + +backing_queue_pre_hibernate(BQ, BQS) -> + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + BQ:handle_pre_hibernate(BQS2). + +ensure_sync_timer(Getter, Setter, State) -> + case Getter(State) of + undefined -> {ok, TRef} = timer:apply_after( + ?SYNC_INTERVAL, rabbit_amqqueue, + sync_timeout, [self()]), + Setter(TRef, State); + _TRef -> State + end. + +stop_sync_timer(Getter, Setter, State) -> + case Getter(State) of + undefined -> State; + TRef -> {ok, cancel} = timer:cancel(TRef), + Setter(undefined, State) + end. + +ensure_rate_timer(Getter, Setter, State) -> + case Getter(State) of + undefined -> {ok, TRef} = + timer:apply_after( + ?RAM_DURATION_UPDATE_INTERVAL, rabbit_amqqueue, + update_ram_duration, [self()]), + Setter(TRef, State); + just_measured -> Setter(undefined, State); + _TRef -> State + end. + +stop_rate_timer(Getter, Setter, State) -> + case Getter(State) of + undefined -> State; + just_measured -> Setter(undefined, State); + TRef -> {ok, cancel} = timer:cancel(TRef), + Setter(undefined, State) + end. -- cgit v1.2.1 From 3cf667e6bf9d44e6195a331026f28ae3cdf3cb79 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 15:10:40 +0100 Subject: Specs. --- src/rabbit_trace.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 4c2ae858..2ef28be8 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -25,7 +25,11 @@ -ifdef(use_specs). -%% TODO +-type(delivery_tag() :: pos_integer()). + +-spec(tap_trace_in/1 :: (rabbit_types:basic_message()) -> 'ok'). +-spec(tap_trace_out/3 :: (rabbit_amqqueue:qmsg(), delivery_tag(), + rabbit_types:maybe(rabbit_types:ctag())) -> 'ok'). -endif. -- cgit v1.2.1 From 0cabb3204f3683079a24d0f855817fd9b8a29c7f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 17:03:58 +0100 Subject: Oops. --- src/rabbit_binding.erl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 84584a1c..583d4fa4 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -116,12 +116,14 @@ recover(XNames, QNames) -> mnesia:read({rabbit_semi_durable_route, B}) =/= [] end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> - case Tx of - true -> ok = sync_transient_binding(R, fun mnesia:write/3); - false -> ok - end, {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_binding, [Tx, X, B]) + Serial = case Tx of + true -> ok = sync_transient_binding( + R, fun mnesia:write/3), + transaction; + false -> rabbit_exchange:serial(X) + end, + rabbit_exchange:callback(X, add_binding, [Serial, X, B]) end, rabbit_semi_durable_route), ok. -- cgit v1.2.1 From ab9625d32692ff221a449d3952a911e840ffa944 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 8 Apr 2011 18:10:19 +0100 Subject: Don't cons inside the tx, prevents us from copying the accumulator on the way into the worker pool at great cost. --- src/rabbit_misc.erl | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 85e08615..814a5bbc 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -467,17 +467,20 @@ map_in_order(F, L) -> %% We ignore entries that have been modified or removed. table_filter(Pred, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, - read) of - [] -> false; - _ -> Pred(E) - end - end, - fun (false, _Tx) -> Acc; - (true, Tx) -> PrePostCommitFun(E, Tx), - [E | Acc] - end) + fun (E, Acc) -> case execute_mnesia_transaction( + fun () -> case mnesia:match_object(TableName, E, + read) of + [] -> false; + _ -> Pred(E) + end + end, + fun (false, _Tx) -> false; + (true, Tx) -> PrePostCommitFun(E, Tx), + true + end) of + false -> Acc; + true -> [E | Acc] + end end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From c3decea2e53d15aec7db221110236ecacc8cd867 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 18:13:35 +0100 Subject: Some initial HA design documentation --- src/rabbit_mirror_queue_coordinator.erl | 75 +++++++++++++++++++++++++++++++++ src/rabbit_mirror_queue_master.erl | 3 ++ src/rabbit_mirror_queue_slave.erl | 3 ++ 3 files changed, 81 insertions(+) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 5fd07e60..f780f6b5 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -35,6 +35,81 @@ -define(ONE_SECOND, 1000). +%%---------------------------------------------------------------------------- +%% +%% Mirror Queues +%% +%% A queue with mirrors consists of the following: +%% +%% #amqqueue{ pid, mirror_pids } +%% | | +%% +----------+ +-------+--------------+-----------...etc... +%% | | | +%% V V V +%% amqqueue_process---+ slave-----+ slave-----+ ...etc... +%% | BQ = master----+ | | BQ = vq | | BQ = vq | +%% | | BQ = vq | | +-+-------+ +-+-------+ +%% | +-+-------+ | | | +%% +-++-----|---------+ | | +%% || | | | +%% || coordinator-+ | | +%% || +-+---------+ | | +%% || | | | +%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc... +%% || +--+ +--+ +--+ +%% || +%% consumers +%% +%% The master is merely an implementation of BQ, and thus is invoked +%% through the normal BQ interface by the amqqueue_process. The slaves +%% meanwhile are processes in their own right (as is the +%% coordinator). The coordinator and all slaves belong to the same gm +%% group. Every member of a gm group receives messages sent to the gm +%% group. Because the master is the BQ of amqqueue_process, it doesn't +%% have sole control over its mailbox, and as a result, the master +%% itself cannot be passed messages directly, yet it needs to react to +%% gm events, such as the death of slaves. Thus the master creates the +%% coordinator, and it is the coordinator that is the gm callback +%% module and event handler for the master. +%% +%% Consumers are only attached to the master. Thus the master is +%% responsible for informing all slaves when messages are fetched from +%% the BQ, when they're acked, and when they're requeued. +%% +%% The basic goal is to ensure that all slaves performs actions on +%% their BQ in the same order as the master. Thus the master +%% intercepts all events going to its BQ, and suitably broadcasts +%% these events on the gm. The slaves thus receive two streams of +%% events: one stream is via the gm, and one stream is from channels +%% directly. Note that whilst the stream via gm is guaranteed to be +%% consistently seen by all slaves, the same is not true of the stream +%% via channels. For example, in the event of an unexpected death of a +%% channel during a publish, only some of the mirrors may receive that +%% publish. As a result of this problem, the messages broadcast over +%% the gm contain published content, and thus slaves can operate +%% successfully on messages that they only receive via the gm. The key +%% purpose of also sending messages directly from the channels to the +%% slaves is that without this, in the event of the death of the +%% master, messages can be lost until a suitable slave is promoted. +%% +%% However, there are other reasons as well. For example, if confirms +%% are in use, then there is no guarantee that every slave will see +%% the delivery with the same msg_seq_no. As a result, the slaves have +%% to wait until they've seen both the publish via gm, and the publish +%% via the channel before they have enough information to be able to +%% issue the confirm, if necessary. Either form of publish can arrive +%% first, and a slave can be upgraded to the master at any point +%% during this process. Confirms continue to be issued correctly, +%% however. +%% +%% Because the slave is a full process, it impersonates parts of the +%% amqqueue API. However, it does not need to implement all parts: for +%% example, no ack or consumer-related message can arrive directly at +%% a slave from a channel: it is only publishes that pass both +%% directly to the slaves and go via gm. +%% +%%---------------------------------------------------------------------------- + start_link(Queue, GM) -> gen_server2:start_link(?MODULE, [Queue, GM], []). diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 664c706d..e6a71370 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -42,6 +42,9 @@ ack_msg_id }). +%% For general documentation of HA design, see +%% rabbit_mirror_queue_coordinator +%% %% Some notes on transactions %% %% We don't support transactions on mirror queues. To do so is diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 70b5c43d..89b8971c 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -16,6 +16,9 @@ -module(rabbit_mirror_queue_slave). +%% For general documentation of HA design, see +%% rabbit_mirror_queue_coordinator +%% %% We join the GM group before we add ourselves to the amqqueue %% record. As a result: %% 1. We can receive msgs from GM that correspond to messages we will -- cgit v1.2.1 From 15ea60267f1132150ebf89f9d2299e8d2323f688 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 8 Apr 2011 18:15:38 +0100 Subject: Additional hilarious witticism --- src/rabbit_mirror_queue_coordinator.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index f780f6b5..84220a5b 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -50,7 +50,7 @@ %% | BQ = master----+ | | BQ = vq | | BQ = vq | %% | | BQ = vq | | +-+-------+ +-+-------+ %% | +-+-------+ | | | -%% +-++-----|---------+ | | +%% +-++-----|---------+ | | (some details elided) %% || | | | %% || coordinator-+ | | %% || +-+---------+ | | -- cgit v1.2.1 From c8d1a130a5bb48c30e65399ad416b37a27e53afd Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 18:33:44 +0100 Subject: shrink --- src/rabbit_misc.erl | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 814a5bbc..87181c24 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -467,20 +467,16 @@ map_in_order(F, L) -> %% We ignore entries that have been modified or removed. table_filter(Pred, PrePostCommitFun, TableName) -> lists:foldl( - fun (E, Acc) -> case execute_mnesia_transaction( - fun () -> case mnesia:match_object(TableName, E, - read) of - [] -> false; - _ -> Pred(E) - end - end, - fun (false, _Tx) -> false; - (true, Tx) -> PrePostCommitFun(E, Tx), - true - end) of - false -> Acc; - true -> [E | Acc] - end + fun (E, Acc) -> + case execute_mnesia_transaction( + fun () -> mnesia:match_object(TableName, E, read) =/= [] + andalso Pred(E) end, + fun (false, _Tx) -> false; + (true, Tx) -> PrePostCommitFun(E, Tx), true + end) of + false -> Acc; + true -> [E | Acc] + end end, [], dirty_read_all(TableName)). dirty_read_all(TableName) -> -- cgit v1.2.1 From 98a8472c6f52abb6dcd198ed07a395d337cf35fa Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 20:08:45 +0100 Subject: remove duplicate check rabbit_misc:table_filter already filters out elements which have disappeared. --- src/rabbit_binding.erl | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index ca7be59a..7d13ea29 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -98,15 +98,12 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), rabbit_misc:table_filter( - fun (#route{binding = B = #binding{destination = Dst = - #resource{kind = Kind}}}) -> - %% The check against rabbit_durable_route is in case it - %% disappeared between getting the list and here + fun (#route{binding = #binding{destination = Dst = + #resource{kind = Kind}}}) -> sets:is_element(Dst, case Kind of exchange -> XNameSet; queue -> QNameSet - end) andalso - mnesia:read({rabbit_durable_route, B}) =/= [] + end) end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> case Tx of -- cgit v1.2.1 From de9c5b5bd077da91ab3dd09b1654e4d0bd650452 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 20:09:17 +0100 Subject: cosmetic: update comment on table_filter --- src/rabbit_misc.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 87181c24..cec10ff6 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -460,9 +460,8 @@ map_in_order(F, L) -> lists:reverse( lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)). -%% Fold over each entry in a table, executing the pre-post-commit function in a -%% transaction. This is often far more efficient than wrapping a tx -%% around the lot. +%% Apply a pre-post-commit function to all entries in a table that +%% satisfy a predicate, and return those entries. %% %% We ignore entries that have been modified or removed. table_filter(Pred, PrePostCommitFun, TableName) -> -- cgit v1.2.1 From 9456939f2ad57435fa19975bc552762ed722d83b Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Apr 2011 22:08:38 +0100 Subject: cosmetic changes and minor tweaks to rabbit_binding:{add,remove} - align 'add' and 'remove' structurally, with the isomorphic final phases extracted into helper funs - call 'read' instead of 'match_object' to check for binding presence in 'remove' - cleaner and possibly slightly more efficient --- src/rabbit_binding.erl | 64 ++++++++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7d13ea29..0fb0639a 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -124,8 +124,6 @@ exists(Binding) -> add(Binding) -> add(Binding, fun (_Src, _Dst) -> ok end). -remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). - add(Binding, InnerFun) -> binding_action( Binding, @@ -134,56 +132,46 @@ add(Binding, InnerFun) -> %% in general, we want to fail on that in preference to %% anything else case InnerFun(Src, Dst) of - ok -> add(Src, Dst, B); + ok -> case mnesia:read({rabbit_route, B}) of + [] -> add(Src, Dst, B); + [_] -> fun rabbit_misc:const_ok/1 + end; {error, _} = Err -> rabbit_misc:const(Err) end end). add(Src, Dst, B) -> - case mnesia:read({rabbit_route, B}) of - [] -> Durable = all_durable([Src, Dst]), - case (not Durable orelse - mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> ok = sync_binding(B, Durable, fun mnesia:write/3), - fun (Tx) -> - ok = rabbit_exchange:callback( - Src, add_binding, [Tx, Src, B]), - rabbit_event:notify_if( - not Tx, binding_created, info(B)) - end; - false -> rabbit_misc:const(not_found) - end; - [_] -> fun rabbit_misc:const_ok/1 + Durable = all_durable([Src, Dst]), + case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of + true -> ok = sync_binding(B, Durable, fun mnesia:write/3), + fun (Tx) -> ok = rabbit_exchange:callback(Src, add_binding, + [Tx, Src, B]), + rabbit_event:notify_if(not Tx, binding_created, + info(B)) + end; + false -> rabbit_misc:const(not_found) end. +remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). + remove(Binding, InnerFun) -> binding_action( Binding, fun (Src, Dst, B) -> - Result = - case mnesia:match_object(rabbit_route, #route{binding = B}, - write) of - [] -> - {error, binding_not_found}; - [_] -> - case InnerFun(Src, Dst) of - ok -> - ok = sync_binding(B, all_durable([Src, Dst]), - fun mnesia:delete_object/3), - {ok, maybe_auto_delete(B#binding.source, - [B], new_deletions())}; - {error, _} = E -> - E - end - end, - case Result of - {error, _} = Err -> - rabbit_misc:const(Err); - {ok, Deletions} -> - fun (Tx) -> ok = process_deletions(Deletions, Tx) end + case mnesia:read(rabbit_route, B, write) of + [] -> rabbit_misc:const({error, binding_not_found}); + [_] -> case InnerFun(Src, Dst) of + ok -> remove(Src, Dst, B); + {error, _} = Err -> rabbit_misc:const(Err) + end end end). +remove(Src, Dst, B) -> + ok = sync_binding(B, all_durable([Src, Dst]), fun mnesia:delete_object/3), + Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()), + fun (Tx) -> ok = process_deletions(Deletions, Tx) end. + list(VHostPath) -> VHostResource = rabbit_misc:r(VHostPath, '_'), Route = #route{binding = #binding{source = VHostResource, -- cgit v1.2.1 From 4499806171ae66cbb08b02a4309e876ff5efc0d7 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 9 Apr 2011 00:15:52 +0100 Subject: tiny refactor --- src/rabbit_binding.erl | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 0fb0639a..b0a59a0c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -281,17 +281,16 @@ sync_transient_binding(Binding, Fun) -> call_with_source_and_destination(SrcName, DstName, Fun) -> SrcTable = table_for_resource(SrcName), DstTable = table_for_resource(DstName), - ErrFun = fun (Err) -> rabbit_misc:const(Err) end, + ErrFun = fun (Err) -> rabbit_misc:const({error, Err}) end, rabbit_misc:execute_mnesia_tx_with_tail( fun () -> case {mnesia:read({SrcTable, SrcName}), mnesia:read({DstTable, DstName})} of {[Src], [Dst]} -> Fun(Src, Dst); - {[], [_] } -> ErrFun({error, source_not_found}); - {[_], [] } -> ErrFun({error, destination_not_found}); - {[], [] } -> ErrFun({error, - source_and_destination_not_found}) - end + {[], [_] } -> ErrFun(source_not_found); + {[_], [] } -> ErrFun(destination_not_found); + {[], [] } -> ErrFun(source_and_destination_not_found) + end end). table_for_resource(#resource{kind = exchange}) -> rabbit_exchange; -- cgit v1.2.1 From 17010d0bd4e1db3a2f82916291e793e46ee3f5bf Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 9 Apr 2011 00:38:18 +0100 Subject: correct error when attempting to stomp on an unavailable binding ...and fix the specs too plus some cosmetic shuffling --- src/rabbit_binding.erl | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b0a59a0c..c2c8dc1f 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -17,7 +17,7 @@ -module(rabbit_binding). -include("rabbit.hrl"). --export([recover/2, exists/1, add/1, remove/1, add/2, remove/2, list/1]). +-export([recover/2, exists/1, add/1, add/2, remove/1, remove/2, list/1]). -export([list_for_source/1, list_for_destination/1, list_for_source_and_destination/2]). -export([new_deletions/0, combine_deletions/2, add_deletion/3, @@ -38,25 +38,24 @@ -type(bind_errors() :: rabbit_types:error('source_not_found' | 'destination_not_found' | 'source_and_destination_not_found')). --type(bind_res() :: 'ok' | bind_errors()). +-type(bind_ok_or_error() :: 'ok' | bind_errors() | + rabbit_types:error('binding_not_found')). +-type(bind_res() :: bind_ok_or_error() | rabbit_misc:const(bind_ok_or_error())). -type(inner_fun() :: fun((rabbit_types:exchange(), rabbit_types:exchange() | rabbit_types:amqqueue()) -> rabbit_types:ok_or_error(rabbit_types:amqp_error()))). -type(bindings() :: [rabbit_types:binding()]). --type(add_res() :: bind_res() | rabbit_misc:const(bind_res())). --type(bind_or_error() :: bind_res() | rabbit_types:error('binding_not_found')). --type(remove_res() :: bind_or_error() | rabbit_misc:const(bind_or_error())). -opaque(deletions() :: dict()). -spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) -> 'ok'). -spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()). --spec(add/1 :: (rabbit_types:binding()) -> add_res()). --spec(remove/1 :: (rabbit_types:binding()) -> remove_res()). --spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> add_res()). --spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> remove_res()). +-spec(add/1 :: (rabbit_types:binding()) -> bind_res()). +-spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()). +-spec(remove/1 :: (rabbit_types:binding()) -> bind_res()). +-spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()). -spec(list/1 :: (rabbit_types:vhost()) -> bindings()). -spec(list_for_source/1 :: (rabbit_types:binding_source()) -> bindings()). @@ -149,7 +148,7 @@ add(Src, Dst, B) -> rabbit_event:notify_if(not Tx, binding_created, info(B)) end; - false -> rabbit_misc:const(not_found) + false -> rabbit_misc:const({error, binding_not_found}) end. remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end). -- cgit v1.2.1 From 2e4ec2ed4337f79c5701fd9e5295c2b6fef50078 Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Sat, 9 Apr 2011 16:51:13 +0000 Subject: Detect available memory on OpenBSD. While there, rename freebsd_sysctl() to sysctl(), since sysctl is available on number of different platforms. --- src/vm_memory_monitor.erl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl index dcc6aff5..fb2fa267 100644 --- a/src/vm_memory_monitor.erl +++ b/src/vm_memory_monitor.erl @@ -239,10 +239,13 @@ get_total_memory({unix,darwin}) -> PageSize * (Inactive + Active + Free + Wired); get_total_memory({unix,freebsd}) -> - PageSize = freebsd_sysctl("vm.stats.vm.v_page_size"), - PageCount = freebsd_sysctl("vm.stats.vm.v_page_count"), + PageSize = sysctl("vm.stats.vm.v_page_size"), + PageCount = sysctl("vm.stats.vm.v_page_count"), PageCount * PageSize; +get_total_memory({unix,openbsd}) -> + sysctl("hw.usermem"); + get_total_memory({win32,_OSname}) -> %% Due to the Erlang print format bug, on Windows boxes the memory %% size is broken. For example Windows 7 64 bit with 4Gigs of RAM @@ -342,7 +345,7 @@ parse_line_aix(Line) -> false -> list_to_integer(Value) end}. -freebsd_sysctl(Def) -> +sysctl(Def) -> list_to_integer(cmd("/sbin/sysctl -n " ++ Def) -- "\n"). %% file:read_file does not work on files in /proc as it seems to get -- cgit v1.2.1 From a49f0d0b733cdc2bc72716cb6c41083ce866aeb0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 10 Apr 2011 12:53:52 +0100 Subject: Work on documentation of ha --- src/rabbit_mirror_queue_coordinator.erl | 146 +++++++++++++++++++++++++++----- src/rabbit_mirror_queue_master.erl | 40 --------- 2 files changed, 124 insertions(+), 62 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 84220a5b..7e521e49 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -60,47 +60,49 @@ %% || %% consumers %% -%% The master is merely an implementation of BQ, and thus is invoked -%% through the normal BQ interface by the amqqueue_process. The slaves +%% The master is merely an implementation of bq, and thus is invoked +%% through the normal bq interface by the amqqueue_process. The slaves %% meanwhile are processes in their own right (as is the %% coordinator). The coordinator and all slaves belong to the same gm %% group. Every member of a gm group receives messages sent to the gm -%% group. Because the master is the BQ of amqqueue_process, it doesn't +%% group. Because the master is the bq of amqqueue_process, it doesn't %% have sole control over its mailbox, and as a result, the master -%% itself cannot be passed messages directly, yet it needs to react to -%% gm events, such as the death of slaves. Thus the master creates the -%% coordinator, and it is the coordinator that is the gm callback -%% module and event handler for the master. +%% itself cannot be passed messages directly (well, it could by via +%% the amqqueue:run_backing_queue_async callback but that would induce +%% additional unnecessary loading on the master queue process), yet it +%% needs to react to gm events, such as the death of slaves. Thus the +%% master creates the coordinator, and it is the coordinator that is +%% the gm callback module and event handler for the master. %% %% Consumers are only attached to the master. Thus the master is %% responsible for informing all slaves when messages are fetched from -%% the BQ, when they're acked, and when they're requeued. +%% the bq, when they're acked, and when they're requeued. %% %% The basic goal is to ensure that all slaves performs actions on -%% their BQ in the same order as the master. Thus the master -%% intercepts all events going to its BQ, and suitably broadcasts +%% their bqs in the same order as the master. Thus the master +%% intercepts all events going to its bq, and suitably broadcasts %% these events on the gm. The slaves thus receive two streams of %% events: one stream is via the gm, and one stream is from channels -%% directly. Note that whilst the stream via gm is guaranteed to be -%% consistently seen by all slaves, the same is not true of the stream -%% via channels. For example, in the event of an unexpected death of a +%% directly. Whilst the stream via gm is guaranteed to be consistently +%% seen by all slaves, the same is not true of the stream via +%% channels. For example, in the event of an unexpected death of a %% channel during a publish, only some of the mirrors may receive that %% publish. As a result of this problem, the messages broadcast over %% the gm contain published content, and thus slaves can operate %% successfully on messages that they only receive via the gm. The key %% purpose of also sending messages directly from the channels to the %% slaves is that without this, in the event of the death of the -%% master, messages can be lost until a suitable slave is promoted. +%% master, messages could be lost until a suitable slave is promoted. %% -%% However, there are other reasons as well. For example, if confirms -%% are in use, then there is no guarantee that every slave will see -%% the delivery with the same msg_seq_no. As a result, the slaves have -%% to wait until they've seen both the publish via gm, and the publish +%% However, that is not the only reason. For example, if confirms are +%% in use, then there is no guarantee that every slave will see the +%% delivery with the same msg_seq_no. As a result, the slaves have to +%% wait until they've seen both the publish via gm, and the publish %% via the channel before they have enough information to be able to -%% issue the confirm, if necessary. Either form of publish can arrive -%% first, and a slave can be upgraded to the master at any point -%% during this process. Confirms continue to be issued correctly, -%% however. +%% perform the publish to their own bq, and subsequently issue the +%% confirm, if necessary. Either form of publish can arrive first, and +%% a slave can be upgraded to the master at any point during this +%% process. Confirms continue to be issued correctly, however. %% %% Because the slave is a full process, it impersonates parts of the %% amqqueue API. However, it does not need to implement all parts: for @@ -108,6 +110,106 @@ %% a slave from a channel: it is only publishes that pass both %% directly to the slaves and go via gm. %% +%% Slaves can be added dynamically. When this occurs, there is no +%% attempt made to sync the current contents of the master with the +%% new slave, thus the slave will start empty, regardless of the state +%% of the master. Thus the slave needs to be able to detect and ignore +%% operations which are for messages it has not received: because of +%% the strict FIFO nature of queues in general, this is +%% straightforward - all new publishes that the new slave receives via +%% gm should be processed as normal, but fetches which are for +%% messages the slave has never seen should be ignored. Similarly, +%% acks for messages the slave never fetched should be +%% ignored. Eventually, as the master is consumed from, the messages +%% at the head of the queue which were there before the slave joined +%% will disappear, and the slave will become fully synced with the +%% state of the master. The detection of the sync-status of a slave is +%% done entirely based on length: if the slave and the master both +%% agree on the length of the queue after the fetch of the head of the +%% queue, then the queues must be in sync. The only other possibility +%% is that the slave's queue is shorter, and thus the fetch should be +%% ignored. +%% +%% Because acktags are issued by the bq independently, and because +%% there is no requirement for the master and all slaves to use the +%% same bq, all references to msgs going over gm is by msg_id. Thus +%% upon acking, the master must convert the acktags back to msg_ids +%% (which happens to be what bq:ack returns), then sends the msg_ids +%% over gm, the slaves must convert the msg_ids to acktags (a mapping +%% the slaves themselves must maintain). +%% +%% When the master dies, a slave gets promoted. This will be the +%% eldest slave, and thus the hope is that that slave is most likely +%% to be sync'd with the master. The design of gm is that the +%% notification of the death of the master will only appear once all +%% messages in-flight from the master have been fully delivered to all +%% members of the gm group. Thus at this point, the slave that gets +%% promoted cannot broadcast different events in a different order +%% than the master for the same msgs: there is no possibility for the +%% same msg to be processed by the old master and the new master - if +%% it was processed by the old master then it will have been processed +%% by the slave before the slave was promoted, and vice versa. +%% +%% Upon promotion, all msgs pending acks are requeued as normal, the +%% slave constructs state suitable for use in the master module, and +%% then dynamically changes into an amqqueue_process with the master +%% as the bq, and the slave's bq as the master's bq. Thus the very +%% same process that was the slave is now a full amqqueue_process. +%% +%% In the event of channel failure, there is the possibility that a +%% msg that was being published only makes it to some of the +%% mirrors. If it makes it to the master, then the master will push +%% the entire message onto gm, and all the slaves will publish it to +%% their bq, even though they may not receive it directly from the +%% channel. This currently will create a small memory leak in the +%% slave's msg_id_status mapping as the slaves will expect that +%% eventually they'll receive the msg from the channel. If the message +%% does not make it to the master then the slaves that receive it will +%% hold onto the message, assuming it'll eventually appear via +%% gm. Again, this will currently result in a memory leak, though this +%% time, it's the entire message rather than tracking the status of +%% the message, which is potentially much worse. This may eventually +%% be solved by monitoring publishing channels in some way. +%% +%% We don't support transactions on mirror queues. To do so is +%% challenging. The underlying bq is free to add the contents of the +%% txn to the queue proper at any point after the tx.commit comes in +%% but before the tx.commit-ok goes out. This means that it is not +%% safe for all mirrors to simply issue the bq:tx_commit at the same +%% time, as the addition of the txn's contents to the queue may +%% subsequently be inconsistently interwoven with other actions on the +%% bq. The solution to this is, in the master, wrap the PostCommitFun +%% and do the gm:broadcast in there: at that point, you're in the bq +%% (well, there's actually nothing to stop that function being invoked +%% by some other process, but let's pretend for now: you could always +%% use run_backing_queue to ensure you really are in the queue process +%% (the _async variant would be unsafe from an ordering pov)), the +%% gm:broadcast is safe because you don't have to worry about races +%% with other gm:broadcast calls (same process). Thus this signal +%% would indicate sufficiently to all the slaves that they must insert +%% the complete contents of the txn at precisely this point in the +%% stream of events. +%% +%% However, it's quite difficult for the slaves to make that happen: +%% they would be forced to issue the bq:tx_commit at that point, but +%% then stall processing any further instructions from gm until they +%% receive the notification from their bq that the tx_commit has fully +%% completed (i.e. they need to treat what is an async system as being +%% fully synchronous). This is not too bad (apart from the +%% vomit-inducing notion of it all): just need a queue of instructions +%% from the GM; but then it gets rather worse when you consider what +%% needs to happen if the master dies at this point and the slave in +%% the middle of this tx_commit needs to be promoted. +%% +%% Finally, we can't possibly hope to make transactions atomic across +%% mirror queues, and it's not even clear that that's desirable: if a +%% slave fails whilst there's an open transaction in progress then +%% when the channel comes to commit the txn, it will detect the +%% failure and destroy the channel. However, the txn will have +%% actually committed successfully in all the other mirrors (including +%% master). To do this bit properly would require 2PC and all the +%% baggage that goes with that. +%% %%---------------------------------------------------------------------------- start_link(Queue, GM) -> diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index e6a71370..481ee7c4 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -44,46 +44,6 @@ %% For general documentation of HA design, see %% rabbit_mirror_queue_coordinator -%% -%% Some notes on transactions -%% -%% We don't support transactions on mirror queues. To do so is -%% challenging. The underlying bq is free to add the contents of the -%% txn to the queue proper at any point after the tx.commit comes in -%% but before the tx.commit-ok goes out. This means that it is not -%% safe for all mirrors to simply issue the BQ:tx_commit at the same -%% time, as the addition of the txn's contents to the queue may -%% subsequently be inconsistently interwoven with other actions on the -%% BQ. The solution to this is, in the master, wrap the PostCommitFun -%% and do the gm:broadcast in there: at that point, you're in the BQ -%% (well, there's actually nothing to stop that function being invoked -%% by some other process, but let's pretend for now: you could always -%% use run_backing_queue_async to ensure you really are in the queue -%% process), the gm:broadcast is safe because you don't have to worry -%% about races with other gm:broadcast calls (same process). Thus this -%% signal would indicate sufficiently to all the slaves that they must -%% insert the complete contents of the txn at precisely this point in -%% the stream of events. -%% -%% However, it's quite difficult for the slaves to make that happen: -%% they would be forced to issue the tx_commit at that point, but then -%% stall processing any further instructions from gm until they -%% receive the notification from their bq that the tx_commit has fully -%% completed (i.e. they need to treat what is an async system as being -%% fully synchronous). This is not too bad (apart from the -%% vomit-inducing notion of it all): just need a queue of instructions -%% from the GM; but then it gets rather worse when you consider what -%% needs to happen if the master dies at this point and the slave in -%% the middle of this tx_commit needs to be promoted. -%% -%% Finally, we can't possibly hope to make transactions atomic across -%% mirror queues, and it's not even clear that that's desirable: if a -%% slave fails whilst there's an open transaction in progress then -%% when the channel comes to commit the txn, it will detect the -%% failure and destroy the channel. However, the txn will have -%% actually committed successfully in all the other mirrors (including -%% master). To do this bit properly would require 2PC and all the -%% baggage that goes with that. %% --------------------------------------------------------------------------- %% Backing queue -- cgit v1.2.1 From 2fcc6f2cd5fc580dad0bd6419e311ad957bb29b7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 10 Apr 2011 13:05:36 +0100 Subject: Work on documentation of ha --- src/rabbit_mirror_queue_coordinator.erl | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 7e521e49..05e4a808 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -210,6 +210,17 @@ %% master). To do this bit properly would require 2PC and all the %% baggage that goes with that. %% +%% Recovery of mirrored queues is straightforward: as nodes die, the +%% remaining nodes record this, and eventually a situation is reached +%% in which only one node is alive, which is the master. This is the +%% only node which, upon recovery, will resurrect a mirrored queue: +%% nodes which die and then rejoin as a slave will start off empty as +%% if they have no mirrored content at all. This is not surprising: to +%% achieve anything more sophisticated would require the master and +%% recovering slave to be able to check to see whether they agree on +%% the last seen state of the queue: checking length alone is not +%% sufficient in this case. +%% %%---------------------------------------------------------------------------- start_link(Queue, GM) -> -- cgit v1.2.1 From 9318a445bc0b501c606899988d8d0d60d21ede54 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 12:16:49 +0100 Subject: Only copy routes that aren't already there. --- src/rabbit_binding.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index a8837e30..3c835b56 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -97,7 +97,9 @@ recover(XNames, QNames) -> XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), rabbit_misc:table_filter( - fun (_Route) -> true end, + fun (Route) -> + mnesia:read({rabbit_semi_durable_route, Route}) =:= [] + end, fun (Route, true) -> ok = mnesia:write(rabbit_semi_durable_route, Route, write); (_Route, false) -> -- cgit v1.2.1 From 90629640580f5ec1300ef52db73231696b885e39 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 12:38:39 +0100 Subject: Remove delivery tag. --- src/rabbit_channel.erl | 4 ++-- src/rabbit_trace.erl | 10 +++------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 6ec2a09f..8a234c0f 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -280,7 +280,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, true -> deliver; false -> deliver_no_ack end, State), - rabbit_trace:tap_trace_out(Msg, DeliveryTag, ConsumerTag), + rabbit_trace:tap_trace_out(Msg, ConsumerTag), noreply(State1#ch{next_tag = DeliveryTag + 1}); handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> @@ -673,7 +673,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, true -> get_no_ack; false -> get end, State), - rabbit_trace:tap_trace_out(Msg, DeliveryTag, none), + rabbit_trace:tap_trace_out(Msg, none), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 2ef28be8..6163d14a 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,7 +16,7 @@ -module(rabbit_trace). --export([tap_trace_in/1, tap_trace_out/3]). +-export([tap_trace_in/1, tap_trace_out/2]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). @@ -25,10 +25,8 @@ -ifdef(use_specs). --type(delivery_tag() :: pos_integer()). - -spec(tap_trace_in/1 :: (rabbit_types:basic_message()) -> 'ok'). --spec(tap_trace_out/3 :: (rabbit_amqqueue:qmsg(), delivery_tag(), +-spec(tap_trace_out/2 :: (rabbit_amqqueue:qmsg(), rabbit_types:maybe(rabbit_types:ctag())) -> 'ok'). -endif. @@ -51,7 +49,6 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, Message = #basic_message{ exchange_name = #resource{virtual_host = VHostBin, name = XNameBin}}}, - DeliveryTag, ConsumerTagOrNone) -> check_trace( XNameBin, @@ -59,8 +56,7 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, fun (TraceExchangeBin) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, {EncodedMetadata, Payload} = message_to_table(Message), - Fields0 = [{<<"delivery_tag">>, signedint, DeliveryTag}, - {<<"redelivered">>, signedint, RedeliveredNum}] + Fields0 = [{<<"redelivered">>, signedint, RedeliveredNum}] ++ EncodedMetadata, Fields = case ConsumerTagOrNone of none -> Fields0; -- cgit v1.2.1 From a35d2e51754efed7458c54213e5df92248c1b43f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 13:14:17 +0100 Subject: Convert properties to table in a slightly smarter way. Also refer to the result as "properties" not "headers" since that's what it is. --- src/rabbit_trace.erl | 51 +++++++++++++++++++-------------------------------- 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 6163d14a..eee03165 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -86,41 +86,28 @@ publish(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> message_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, content = Content}) -> - #content{properties = #'P_basic'{content_type = ContentType, - content_encoding = ContentEncoding, - headers = Headers, - delivery_mode = DeliveryMode, - priority = Priority, - correlation_id = CorrelationId, - reply_to = ReplyTo, - expiration = Expiration, - message_id = MessageId, - timestamp = Timestamp, - type = Type, - user_id = UserId, - app_id = AppId}, + #content{properties = Props, payload_fragments_rev = PFR} = rabbit_binary_parser:ensure_content_decoded(Content), - Headers1 = prune_undefined( - [{<<"content_type">>, longstr, ContentType}, - {<<"content_encoding">>, longstr, ContentEncoding}, - {<<"headers">>, table, Headers}, - {<<"delivery_mode">>, signedint, DeliveryMode}, - {<<"priority">>, signedint, Priority}, - {<<"correlation_id">>, longstr, CorrelationId}, - {<<"reply_to">>, longstr, ReplyTo}, - {<<"expiration">>, longstr, Expiration}, - {<<"message_id">>, longstr, MessageId}, - {<<"timestamp">>, longstr, Timestamp}, - {<<"type">>, longstr, Type}, - {<<"user_id">>, longstr, UserId}, - {<<"app_id">>, longstr, AppId}]), + {PropsTable, _Ix} = + lists:foldl( + fun (K, {L, Ix}) -> + V = element(Ix, Props), + NewL = case V of + undefined -> L; + _ -> [{a2b(K), type(K, V), V}|L] + end, + {NewL, Ix + 1} + end, {[], 2}, record_info(fields, 'P_basic')), {[{<<"exchange_name">>, longstr, XName}, {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, - {<<"headers">>, table, Headers1}, - {<<"node">>, longstr, list_to_binary(atom_to_list(node()))}], + {<<"properties">>, table, PropsTable}, + {<<"node">>, longstr, a2b(node())}], list_to_binary(lists:reverse(PFR))}. -prune_undefined(Fields) -> - [F || F = {_, _, Value} <- Fields, - Value =/= undefined]. +a2b(A) -> + list_to_binary(atom_to_list(A)). + +type(headers, _V) -> table; +type(_K, V) when is_integer(V) -> signedint; +type(_K, _V) -> longstr. -- cgit v1.2.1 From 6b4a7674c47bf6ec1df20eb428bcfe2916c26b7d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 13:21:38 +0100 Subject: Add user to trace messages. --- src/rabbit_channel.erl | 15 +++++++++------ src/rabbit_trace.erl | 28 +++++++++++++++++----------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index bfd779ee..b9782b2b 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -264,7 +264,8 @@ handle_cast({deliver, ConsumerTag, AckRequired, routing_keys = [RoutingKey | _CcRoutes], content = Content}}}, State = #ch{writer_pid = WriterPid, - next_tag = DeliveryTag}) -> + next_tag = DeliveryTag, + user = User}) -> State1 = lock_message(AckRequired, ack_record(DeliveryTag, ConsumerTag, Msg), State), @@ -281,7 +282,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, true -> deliver; false -> deliver_no_ack end, State), - rabbit_trace:tap_trace_out(Msg, ConsumerTag), + rabbit_trace:tap_trace_out(Msg, ConsumerTag, User), noreply(State1#ch{next_tag = DeliveryTag + 1}); handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> @@ -588,7 +589,8 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, immediate = Immediate}, Content, State = #ch{virtual_host = VHostPath, transaction_id = TxnKey, - confirm_enabled = ConfirmEnabled}) -> + confirm_enabled = ConfirmEnabled, + user = User}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), check_write_permitted(ExchangeName, State), Exchange = rabbit_exchange:lookup_or_die(ExchangeName), @@ -605,7 +607,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, end, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> - rabbit_trace:tap_trace_in(Message), + rabbit_trace:tap_trace_in(Message, User), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -655,7 +657,8 @@ handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, _, State = #ch{writer_pid = WriterPid, conn_pid = ConnPid, - next_tag = DeliveryTag}) -> + next_tag = DeliveryTag, + user = User}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( @@ -674,7 +677,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, true -> get_no_ack; false -> get end, State), - rabbit_trace:tap_trace_out(Msg, none), + rabbit_trace:tap_trace_out(Msg, none, User), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index eee03165..6dac3cc9 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,7 +16,7 @@ -module(rabbit_trace). --export([tap_trace_in/1, tap_trace_out/2]). +-export([tap_trace_in/2, tap_trace_out/3]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). @@ -25,9 +25,11 @@ -ifdef(use_specs). --spec(tap_trace_in/1 :: (rabbit_types:basic_message()) -> 'ok'). --spec(tap_trace_out/2 :: (rabbit_amqqueue:qmsg(), - rabbit_types:maybe(rabbit_types:ctag())) -> 'ok'). +-spec(tap_trace_in/2 :: (rabbit_types:basic_message(), rabbit_types:user()) + -> 'ok'). +-spec(tap_trace_out/3 :: (rabbit_amqqueue:qmsg(), + rabbit_types:maybe(rabbit_types:ctag()), + rabbit_types:user()) -> 'ok'). -endif. @@ -35,12 +37,13 @@ tap_trace_in(Message = #basic_message{ exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}) -> + name = XNameBin}}, + User) -> check_trace( XNameBin, VHostBin, fun (TraceExchangeBin) -> - {EncodedMetadata, Payload} = message_to_table(Message), + {EncodedMetadata, Payload} = message_to_table(Message, User), publish(TraceExchangeBin, VHostBin, <<"publish">>, XNameBin, EncodedMetadata, Payload) end). @@ -49,13 +52,14 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, Message = #basic_message{ exchange_name = #resource{virtual_host = VHostBin, name = XNameBin}}}, - ConsumerTagOrNone) -> + ConsumerTagOrNone, + User) -> check_trace( XNameBin, VHostBin, fun (TraceExchangeBin) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - {EncodedMetadata, Payload} = message_to_table(Message), + {EncodedMetadata, Payload} = message_to_table(Message, User), Fields0 = [{<<"redelivered">>, signedint, RedeliveredNum}] ++ EncodedMetadata, Fields = case ConsumerTagOrNone of @@ -84,8 +88,9 @@ publish(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> ok. message_to_table(#basic_message{exchange_name = #resource{name = XName}, - routing_keys = RoutingKeys, - content = Content}) -> + routing_keys = RoutingKeys, + content = Content}, + #user{username = Username}) -> #content{properties = Props, payload_fragments_rev = PFR} = rabbit_binary_parser:ensure_content_decoded(Content), @@ -99,7 +104,8 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, end, {NewL, Ix + 1} end, {[], 2}, record_info(fields, 'P_basic')), - {[{<<"exchange_name">>, longstr, XName}, + {[{<<"username">>, longstr, Username}, + {<<"exchange_name">>, longstr, XName}, {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, {<<"properties">>, table, PropsTable}, {<<"node">>, longstr, a2b(node())}], -- cgit v1.2.1 From 29ec840e5772e34d8edaa5d4a095a1a83247b6dd Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 13:23:41 +0100 Subject: Less magic. --- src/rabbit_trace.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 6dac3cc9..2fdf5c34 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -100,7 +100,7 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, V = element(Ix, Props), NewL = case V of undefined -> L; - _ -> [{a2b(K), type(K, V), V}|L] + _ -> [{a2b(K), type(V), V} | L] end, {NewL, Ix + 1} end, {[], 2}, record_info(fields, 'P_basic')), @@ -114,6 +114,6 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, a2b(A) -> list_to_binary(atom_to_list(A)). -type(headers, _V) -> table; -type(_K, V) when is_integer(V) -> signedint; -type(_K, _V) -> longstr. +type(V) when is_list(V) -> table; +type(V) when is_integer(V) -> signedint; +type(V) -> longstr. -- cgit v1.2.1 From b80191a7f7df5a3e9888faaaa7272a21507d4370 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 13:31:52 +0100 Subject: Cosmetic. --- src/rabbit_trace.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 2fdf5c34..eb25121b 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -37,8 +37,7 @@ tap_trace_in(Message = #basic_message{ exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}, - User) -> + name = XNameBin}}, User) -> check_trace( XNameBin, VHostBin, @@ -52,8 +51,7 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, Message = #basic_message{ exchange_name = #resource{virtual_host = VHostBin, name = XNameBin}}}, - ConsumerTagOrNone, - User) -> + ConsumerTagOrNone, User) -> check_trace( XNameBin, VHostBin, -- cgit v1.2.1 From 67c8514a974e0773c12c461fad00a72d9a47df0c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 13:37:09 +0100 Subject: Get rid of that "Bin" suffix, it was inconsistent and strings are generally binaries in the broker anyway. --- src/rabbit_trace.erl | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index eb25121b..a823879a 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -36,26 +36,26 @@ %%---------------------------------------------------------------------------- tap_trace_in(Message = #basic_message{ - exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}, User) -> + exchange_name = #resource{virtual_host = VHost, + name = XName}}, User) -> check_trace( - XNameBin, - VHostBin, - fun (TraceExchangeBin) -> + XName, + VHost, + fun (TraceExchange) -> {EncodedMetadata, Payload} = message_to_table(Message, User), - publish(TraceExchangeBin, VHostBin, <<"publish">>, XNameBin, + publish(TraceExchange, VHost, <<"publish">>, XName, EncodedMetadata, Payload) end). -tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, +tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Message = #basic_message{ - exchange_name = #resource{virtual_host = VHostBin, - name = XNameBin}}}, + exchange_name = #resource{virtual_host = VHost, + name = XName}}}, ConsumerTagOrNone, User) -> check_trace( - XNameBin, - VHostBin, - fun (TraceExchangeBin) -> + XName, + VHost, + fun (TraceExchange) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, {EncodedMetadata, Payload} = message_to_table(Message, User), Fields0 = [{<<"redelivered">>, signedint, RedeliveredNum}] @@ -65,22 +65,22 @@ tap_trace_out({#resource{name = QNameBin}, _QPid, _QMsgId, Redelivered, CTag -> [{<<"consumer_tag">>, longstr, CTag} | Fields0] end, - publish(TraceExchangeBin, VHostBin, <<"deliver">>, QNameBin, + publish(TraceExchange, VHost, <<"deliver">>, QName, Fields, Payload) end). -check_trace(XNameBin, VHostBin, F) -> - case catch case application:get_env(rabbit, {trace_exchange, VHostBin}) of - undefined -> ok; - {ok, XNameBin} -> ok; - {ok, TraceExchangeBin} -> F(TraceExchangeBin) +check_trace(XName, VHost, F) -> + case catch case application:get_env(rabbit, {trace_exchange, VHost}) of + undefined -> ok; + {ok, XName} -> ok; + {ok, TraceExchange} -> F(TraceExchange) end of {'EXIT', Reason} -> rabbit_log:info("Trace tap died: ~p~n", [Reason]); ok -> ok end. -publish(TraceExchangeBin, VHostBin, RKPrefix, RKSuffix, Table, Payload) -> - rabbit_basic:publish(rabbit_misc:r(VHostBin, exchange, TraceExchangeBin), +publish(TraceExchange, VHost, RKPrefix, RKSuffix, Table, Payload) -> + rabbit_basic:publish(rabbit_misc:r(VHost, exchange, TraceExchange), <>, #'P_basic'{headers = Table}, Payload), ok. -- cgit v1.2.1 From c78435a840ed2eb8643e683f1b574f3553e8b202 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 13:39:29 +0100 Subject: Cosmetic. --- src/rabbit_trace.erl | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index a823879a..c39a056c 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -39,8 +39,7 @@ tap_trace_in(Message = #basic_message{ exchange_name = #resource{virtual_host = VHost, name = XName}}, User) -> check_trace( - XName, - VHost, + XName, VHost, fun (TraceExchange) -> {EncodedMetadata, Payload} = message_to_table(Message, User), publish(TraceExchange, VHost, <<"publish">>, XName, @@ -53,8 +52,7 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, name = XName}}}, ConsumerTagOrNone, User) -> check_trace( - XName, - VHost, + XName, VHost, fun (TraceExchange) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, {EncodedMetadata, Payload} = message_to_table(Message, User), @@ -114,4 +112,4 @@ a2b(A) -> type(V) when is_list(V) -> table; type(V) when is_integer(V) -> signedint; -type(V) -> longstr. +type(_V) -> longstr. -- cgit v1.2.1 From 3091e5caab4e25e69b54fb4076b868bf15ace255 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 11 Apr 2011 14:44:34 +0100 Subject: Remove from all forward routing tables in remove_for_source. --- src/rabbit_binding.erl | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 3c835b56..016e8707 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -248,11 +248,16 @@ remove_for_source(SrcName) -> reverse_route(Route), write), ok = delete_forward_routes(Route), Route#route.binding - end || Route <- mnesia:match_object( - rabbit_route, - #route{binding = #binding{source = SrcName, - _ = '_'}}, - write)]. + end || Route <- sets:to_list( + sets:union( + [sets:from_list(routes_for_source(SrcName, T)) || + T <- [rabbit_route, rabbit_semi_durable_route, + rabbit_durable_route]]))]. + +routes_for_source(SrcName, Table) -> + mnesia:match_object(Table, #route{binding = #binding{source = SrcName, + _ = '_'}}, + write). remove_for_destination(DstName) -> remove_for_destination(DstName, fun delete_forward_routes/1). -- cgit v1.2.1 From ce79d7fc203c13b7c91c85421962a6c741749cf2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 12 Apr 2011 10:49:30 +0100 Subject: Be more specific about what we catch. --- src/rabbit_trace.erl | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index c39a056c..1e3553cb 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -68,13 +68,15 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, end). check_trace(XName, VHost, F) -> - case catch case application:get_env(rabbit, {trace_exchange, VHost}) of - undefined -> ok; - {ok, XName} -> ok; - {ok, TraceExchange} -> F(TraceExchange) - end of - {'EXIT', Reason} -> rabbit_log:info("Trace tap died: ~p~n", [Reason]); - ok -> ok + case application:get_env(rabbit, {trace_exchange, VHost}) of + undefined -> ok; + {ok, XName} -> ok; + {ok, TraceX} -> case catch F(TraceX) of + {'EXIT', Reason} -> rabbit_log:info( + "Trace tap died: ~p~n", + [Reason]); + ok -> ok + end end. publish(TraceExchange, VHost, RKPrefix, RKSuffix, Table, Payload) -> -- cgit v1.2.1 From 766ba5e13d1fdaa4cb0f81ab521d8c4de14af9cb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 12 Apr 2011 10:53:09 +0100 Subject: Remove user and consumer tag. --- src/rabbit_channel.erl | 15 ++++++--------- src/rabbit_trace.erl | 33 +++++++++++---------------------- 2 files changed, 17 insertions(+), 31 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index b9782b2b..62541536 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -264,8 +264,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, routing_keys = [RoutingKey | _CcRoutes], content = Content}}}, State = #ch{writer_pid = WriterPid, - next_tag = DeliveryTag, - user = User}) -> + next_tag = DeliveryTag}) -> State1 = lock_message(AckRequired, ack_record(DeliveryTag, ConsumerTag, Msg), State), @@ -282,7 +281,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, true -> deliver; false -> deliver_no_ack end, State), - rabbit_trace:tap_trace_out(Msg, ConsumerTag, User), + rabbit_trace:tap_trace_out(Msg), noreply(State1#ch{next_tag = DeliveryTag + 1}); handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> @@ -589,8 +588,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, immediate = Immediate}, Content, State = #ch{virtual_host = VHostPath, transaction_id = TxnKey, - confirm_enabled = ConfirmEnabled, - user = User}) -> + confirm_enabled = ConfirmEnabled}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), check_write_permitted(ExchangeName, State), Exchange = rabbit_exchange:lookup_or_die(ExchangeName), @@ -607,7 +605,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, end, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> - rabbit_trace:tap_trace_in(Message, User), + rabbit_trace:tap_trace_in(Message), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -657,8 +655,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, _, State = #ch{writer_pid = WriterPid, conn_pid = ConnPid, - next_tag = DeliveryTag, - user = User}) -> + next_tag = DeliveryTag}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( @@ -677,7 +674,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, true -> get_no_ack; false -> get end, State), - rabbit_trace:tap_trace_out(Msg, none, User), + rabbit_trace:tap_trace_out(Msg), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 1e3553cb..2e16b5d0 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,7 +16,7 @@ -module(rabbit_trace). --export([tap_trace_in/2, tap_trace_out/3]). +-export([tap_trace_in/1, tap_trace_out/1]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). @@ -25,11 +25,8 @@ -ifdef(use_specs). --spec(tap_trace_in/2 :: (rabbit_types:basic_message(), rabbit_types:user()) - -> 'ok'). --spec(tap_trace_out/3 :: (rabbit_amqqueue:qmsg(), - rabbit_types:maybe(rabbit_types:ctag()), - rabbit_types:user()) -> 'ok'). +-spec(tap_trace_in/1 :: (rabbit_types:basic_message()) -> 'ok'). +-spec(tap_trace_out/1 :: (rabbit_amqqueue:qmsg()) -> 'ok'). -endif. @@ -37,11 +34,11 @@ tap_trace_in(Message = #basic_message{ exchange_name = #resource{virtual_host = VHost, - name = XName}}, User) -> + name = XName}}) -> check_trace( XName, VHost, fun (TraceExchange) -> - {EncodedMetadata, Payload} = message_to_table(Message, User), + {EncodedMetadata, Payload} = message_to_table(Message), publish(TraceExchange, VHost, <<"publish">>, XName, EncodedMetadata, Payload) end). @@ -49,20 +46,14 @@ tap_trace_in(Message = #basic_message{ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Message = #basic_message{ exchange_name = #resource{virtual_host = VHost, - name = XName}}}, - ConsumerTagOrNone, User) -> + name = XName}}}) -> check_trace( XName, VHost, fun (TraceExchange) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - {EncodedMetadata, Payload} = message_to_table(Message, User), - Fields0 = [{<<"redelivered">>, signedint, RedeliveredNum}] + {EncodedMetadata, Payload} = message_to_table(Message), + Fields = [{<<"redelivered">>, signedint, RedeliveredNum}] ++ EncodedMetadata, - Fields = case ConsumerTagOrNone of - none -> Fields0; - CTag -> [{<<"consumer_tag">>, longstr, CTag} | - Fields0] - end, publish(TraceExchange, VHost, <<"deliver">>, QName, Fields, Payload) end). @@ -86,9 +77,8 @@ publish(TraceExchange, VHost, RKPrefix, RKSuffix, Table, Payload) -> ok. message_to_table(#basic_message{exchange_name = #resource{name = XName}, - routing_keys = RoutingKeys, - content = Content}, - #user{username = Username}) -> + routing_keys = RoutingKeys, + content = Content}) -> #content{properties = Props, payload_fragments_rev = PFR} = rabbit_binary_parser:ensure_content_decoded(Content), @@ -102,8 +92,7 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, end, {NewL, Ix + 1} end, {[], 2}, record_info(fields, 'P_basic')), - {[{<<"username">>, longstr, Username}, - {<<"exchange_name">>, longstr, XName}, + {[{<<"exchange_name">>, longstr, XName}, {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, {<<"properties">>, table, PropsTable}, {<<"node">>, longstr, a2b(node())}], -- cgit v1.2.1 From 115aef51798825ea76fdc2441ef0ed2b16991edb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 12 Apr 2011 10:56:08 +0100 Subject: Cosmetic. --- src/rabbit_trace.erl | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 2e16b5d0..f03903c5 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -63,10 +63,9 @@ check_trace(XName, VHost, F) -> undefined -> ok; {ok, XName} -> ok; {ok, TraceX} -> case catch F(TraceX) of - {'EXIT', Reason} -> rabbit_log:info( - "Trace tap died: ~p~n", - [Reason]); - ok -> ok + {'EXIT', R} -> rabbit_log:info( + "Trace tap died: ~p~n", [R]); + ok -> ok end end. -- cgit v1.2.1 From e326e7b7b90f232ef0c9d0e5875cf793d2175a9b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 13 Apr 2011 12:09:54 +0100 Subject: Undo merge from bug24038 which has now become INVALID. --- src/rabbit_amqqueue_process.erl | 46 ++++++++++------ src/rabbit_amqqueue_process_utils.erl | 99 ----------------------------------- src/rabbit_mirror_queue_slave.erl | 47 +++++++++++------ 3 files changed, 59 insertions(+), 133 deletions(-) delete mode 100644 src/rabbit_amqqueue_process_utils.erl diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 3bcdf706..53bdd3b2 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -21,6 +21,8 @@ -behaviour(gen_server2). -define(UNSENT_MESSAGE_LIMIT, 100). +-define(SYNC_INTERVAL, 25). %% milliseconds +-define(RAM_DURATION_UPDATE_INTERVAL, 5000). -define(BASE_MESSAGE_PROPERTIES, #message_properties{expiry = undefined, needs_confirming = false}). @@ -260,27 +262,37 @@ backing_queue_module(#amqqueue{arguments = Args}) -> _Nodes -> rabbit_mirror_queue_master end. +ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> + {ok, TRef} = timer:apply_after( + ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), + State#q{sync_timer_ref = TRef}; ensure_sync_timer(State) -> - rabbit_amqqueue_process_utils:ensure_sync_timer( - fun sync_timer_getter/1, fun sync_timer_setter/2, State). - -stop_sync_timer(State) -> - rabbit_amqqueue_process_utils:stop_sync_timer( - fun sync_timer_getter/1, fun sync_timer_setter/2, State). - -sync_timer_getter(State) -> State#q.sync_timer_ref. -sync_timer_setter(Timer, State) -> State#q{sync_timer_ref = Timer}. + State. +stop_sync_timer(State = #q{sync_timer_ref = undefined}) -> + State; +stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> + {ok, cancel} = timer:cancel(TRef), + State#q{sync_timer_ref = undefined}. + +ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> + {ok, TRef} = timer:apply_after( + ?RAM_DURATION_UPDATE_INTERVAL, + rabbit_amqqueue, update_ram_duration, + [self()]), + State#q{rate_timer_ref = TRef}; +ensure_rate_timer(State = #q{rate_timer_ref = just_measured}) -> + State#q{rate_timer_ref = undefined}; ensure_rate_timer(State) -> - rabbit_amqqueue_process_utils:ensure_rate_timer( - fun rate_timer_getter/1, fun rate_timer_setter/2, State). - -stop_rate_timer(State) -> - rabbit_amqqueue_process_utils:stop_rate_timer( - fun rate_timer_getter/1, fun rate_timer_setter/2, State). + State. -rate_timer_getter(State) -> State#q.rate_timer_ref. -rate_timer_setter(Timer, State) -> State#q{rate_timer_ref = Timer}. +stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> + State; +stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> + State#q{rate_timer_ref = undefined}; +stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> + {ok, cancel} = timer:cancel(TRef), + State#q{rate_timer_ref = undefined}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; diff --git a/src/rabbit_amqqueue_process_utils.erl b/src/rabbit_amqqueue_process_utils.erl deleted file mode 100644 index feb2a79c..00000000 --- a/src/rabbit_amqqueue_process_utils.erl +++ /dev/null @@ -1,99 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License at -%% http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -%% License for the specific language governing rights and limitations -%% under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 201-2011 VMware, Inc. All rights reserved. -%% - --module(rabbit_amqqueue_process_utils). - --define(SYNC_INTERVAL, 25). %% milliseconds --define(RAM_DURATION_UPDATE_INTERVAL, 5000). - --export([backing_queue_pre_hibernate/2, - ensure_sync_timer/3, stop_sync_timer/3, - ensure_rate_timer/3, stop_rate_timer/3]). - -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --type(bq_mod() :: atom()). --type(bq_state() :: any()). %% A good example of dialyzer's shortcomings - --type(queue_state() :: any()). %% Another such example. --type(getter(A) :: fun ((queue_state()) -> A)). --type(setter(A) :: fun ((A, queue_state()) -> queue_state())). - --type(tref() :: term()). %% Sigh. According to timer docs. - --spec(backing_queue_pre_hibernate/2 :: (bq_mod(), bq_state()) -> bq_state()). - --spec(ensure_sync_timer/3 :: (getter('undefined'|tref()), - setter('undefined'|tref()), - queue_state()) -> queue_state()). --spec(stop_sync_timer/3 :: (getter('undefined'|tref()), - setter('undefined'|tref()), - queue_state()) -> queue_state()). - --spec(ensure_rate_timer/3 :: (getter('undefined'|'just_measured'|tref()), - setter('undefined'|'just_measured'|tref()), - queue_state()) -> queue_state()). --spec(stop_rate_timer/3 :: (getter('undefined'|'just_measured'|tref()), - setter('undefined'|'just_measured'|tref()), - queue_state()) -> queue_state()). - --endif. - -%%---------------------------------------------------------------------------- - -backing_queue_pre_hibernate(BQ, BQS) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - BQ:handle_pre_hibernate(BQS2). - -ensure_sync_timer(Getter, Setter, State) -> - case Getter(State) of - undefined -> {ok, TRef} = timer:apply_after( - ?SYNC_INTERVAL, rabbit_amqqueue, - sync_timeout, [self()]), - Setter(TRef, State); - _TRef -> State - end. - -stop_sync_timer(Getter, Setter, State) -> - case Getter(State) of - undefined -> State; - TRef -> {ok, cancel} = timer:cancel(TRef), - Setter(undefined, State) - end. - -ensure_rate_timer(Getter, Setter, State) -> - case Getter(State) of - undefined -> {ok, TRef} = - timer:apply_after( - ?RAM_DURATION_UPDATE_INTERVAL, rabbit_amqqueue, - update_ram_duration, [self()]), - Setter(TRef, State); - just_measured -> Setter(undefined, State); - _TRef -> State - end. - -stop_rate_timer(Getter, Setter, State) -> - case Getter(State) of - undefined -> State; - just_measured -> Setter(undefined, State); - TRef -> {ok, cancel} = timer:cancel(TRef), - Setter(undefined, State) - end. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 89b8971c..e3cfe54d 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -47,6 +47,9 @@ -include("rabbit.hrl"). -include("gm_specs.hrl"). +-define(SYNC_INTERVAL, 25). %% milliseconds +-define(RAM_DURATION_UPDATE_INTERVAL, 5000). + -record(state, { q, gm, master_node, @@ -478,27 +481,37 @@ next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) -> backing_queue_idle_timeout(State = #state { backing_queue = BQ }) -> run_backing_queue(BQ, fun (M, BQS) -> M:idle_timeout(BQS) end, State). +ensure_sync_timer(State = #state { sync_timer_ref = undefined }) -> + {ok, TRef} = timer:apply_after( + ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), + State #state { sync_timer_ref = TRef }; ensure_sync_timer(State) -> - rabbit_amqqueue_process_utils:ensure_sync_timer( - fun sync_timer_getter/1, fun sync_timer_setter/2, State). - -stop_sync_timer(State) -> - rabbit_amqqueue_process_utils:stop_sync_timer( - fun sync_timer_getter/1, fun sync_timer_setter/2, State). - -sync_timer_getter(State) -> State#state.sync_timer_ref. -sync_timer_setter(Timer, State) -> State#state{sync_timer_ref = Timer}. + State. +stop_sync_timer(State = #state { sync_timer_ref = undefined }) -> + State; +stop_sync_timer(State = #state { sync_timer_ref = TRef }) -> + {ok, cancel} = timer:cancel(TRef), + State #state { sync_timer_ref = undefined }. + +ensure_rate_timer(State = #state { rate_timer_ref = undefined }) -> + {ok, TRef} = timer:apply_after( + ?RAM_DURATION_UPDATE_INTERVAL, + rabbit_amqqueue, update_ram_duration, + [self()]), + State #state { rate_timer_ref = TRef }; +ensure_rate_timer(State = #state { rate_timer_ref = just_measured }) -> + State #state { rate_timer_ref = undefined }; ensure_rate_timer(State) -> - rabbit_amqqueue_process_utils:ensure_rate_timer( - fun rate_timer_getter/1, fun rate_timer_setter/2, State). - -stop_rate_timer(State) -> - rabbit_amqqueue_process_utils:stop_rate_timer( - fun rate_timer_getter/1, fun rate_timer_setter/2, State). + State. -rate_timer_getter(State) -> State#state.rate_timer_ref. -rate_timer_setter(Timer, State) -> State#state{rate_timer_ref = Timer}. +stop_rate_timer(State = #state { rate_timer_ref = undefined }) -> + State; +stop_rate_timer(State = #state { rate_timer_ref = just_measured }) -> + State #state { rate_timer_ref = undefined }; +stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> + {ok, cancel} = timer:cancel(TRef), + State #state { rate_timer_ref = undefined }. maybe_enqueue_message( Delivery = #delivery { message = #basic_message { id = MsgId }, -- cgit v1.2.1 From 4d454841c1777ae9f7680448f37f1a8cf1f06e2e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 13 Apr 2011 14:38:15 +0100 Subject: -sync_binding(Binding, SrcDurable, DstDurable, Fun) --- src/rabbit_binding.erl | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 016e8707..7fdda9c1 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -149,9 +149,9 @@ add(Binding, InnerFun) -> end). add(Src, Dst, B) -> - Durable = all_durable([Src, Dst]), + Durable = durable(Src) andalso durable(Dst), case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> ok = sync_binding(B, Durable, durable(Dst), + true -> ok = sync_binding(B, durable(Src), durable(Dst), fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback(Src, add_binding, [Tx, Src, B]), @@ -177,7 +177,7 @@ remove(Binding, InnerFun) -> end). remove(Src, Dst, B) -> - ok = sync_binding(B, all_durable([Src, Dst]), durable(Dst), + ok = sync_binding(B, durable(Src), durable(Dst), fun mnesia:delete_object/3), Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()), fun (Tx) -> ok = process_deletions(Deletions, Tx) end. @@ -267,8 +267,6 @@ remove_transient_for_destination(DstName) -> %%---------------------------------------------------------------------------- -all_durable(Resources) -> lists:all(fun durable/1, Resources). - durable(#exchange{durable = D}) -> D; durable(#amqqueue{durable = D}) -> D. @@ -282,15 +280,16 @@ binding_action(Binding = #binding{source = SrcName, Fun(Src, Dst, Binding#binding{args = SortedArgs}) end). -sync_binding(Binding, true, SemiDurable, Fun) -> +%% (Binding, SrcDurable, DstDurable, Fun) +sync_binding(Binding, true, true, Fun) -> Fun(rabbit_durable_route, #route{binding = Binding}, write), - sync_binding(Binding, false, SemiDurable, Fun); + sync_binding(Binding, false, true, Fun); sync_binding(Binding, false, true, Fun) -> Fun(rabbit_semi_durable_route, #route{binding = Binding}, write), sync_binding(Binding, false, false, Fun); -sync_binding(Binding, false, false, Fun) -> +sync_binding(Binding, _SrcDurable, false, Fun) -> sync_transient_binding(Binding, Fun). sync_transient_binding(Binding, Fun) -> -- cgit v1.2.1 From a58a73ddb279c0a9f0adcc7228bf3f739f2a5d54 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 13 Apr 2011 14:48:15 +0100 Subject: Cosmetic. --- src/rabbit_binding.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7fdda9c1..57c766c7 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -239,8 +239,8 @@ has_for_source(SrcName) -> %% we need to check for durable routes here too in case a bunch of %% routes to durable queues have been removed temporarily as a %% result of a node failure - contains(rabbit_route, Match) orelse contains(rabbit_semi_durable_route, - Match). + contains(rabbit_route, Match) orelse + contains(rabbit_semi_durable_route, Match). remove_for_source(SrcName) -> [begin -- cgit v1.2.1 From 054a16a7a73e9fce43959b353a7eb842dd3faab3 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 13 Apr 2011 14:48:25 +0100 Subject: Simpler remove_for_source. --- src/rabbit_binding.erl | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 57c766c7..1cb642a7 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -243,21 +243,16 @@ has_for_source(SrcName) -> contains(rabbit_semi_durable_route, Match). remove_for_source(SrcName) -> + Match = #route{binding = #binding{source = SrcName, _ = '_'}}, + Routes = lists:usort( + mnesia:match_object(rabbit_route, Match, write) ++ + mnesia:match_object(rabbit_durable_route, Match, write)), [begin ok = mnesia:delete_object(rabbit_reverse_route, reverse_route(Route), write), ok = delete_forward_routes(Route), Route#route.binding - end || Route <- sets:to_list( - sets:union( - [sets:from_list(routes_for_source(SrcName, T)) || - T <- [rabbit_route, rabbit_semi_durable_route, - rabbit_durable_route]]))]. - -routes_for_source(SrcName, Table) -> - mnesia:match_object(Table, #route{binding = #binding{source = SrcName, - _ = '_'}}, - write). + end || Route <- Routes]. remove_for_destination(DstName) -> remove_for_destination(DstName, fun delete_forward_routes/1). -- cgit v1.2.1 From 2636afcb14e9d964342c960fe2ceb7e92feba782 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 13 Apr 2011 14:52:05 +0100 Subject: Oops. --- src/rabbit_binding.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1cb642a7..f438bd09 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -277,11 +277,11 @@ binding_action(Binding = #binding{source = SrcName, %% (Binding, SrcDurable, DstDurable, Fun) sync_binding(Binding, true, true, Fun) -> - Fun(rabbit_durable_route, #route{binding = Binding}, write), + ok = Fun(rabbit_durable_route, #route{binding = Binding}, write), sync_binding(Binding, false, true, Fun); sync_binding(Binding, false, true, Fun) -> - Fun(rabbit_semi_durable_route, #route{binding = Binding}, write), + ok = Fun(rabbit_semi_durable_route, #route{binding = Binding}, write), sync_binding(Binding, false, false, Fun); sync_binding(Binding, _SrcDurable, false, Fun) -> -- cgit v1.2.1 From cb7db058ed96b902891b0af91237b71e7b54acd4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 13 Apr 2011 15:12:46 +0100 Subject: sync_binding -> sync_route, simplify. --- src/rabbit_binding.erl | 62 ++++++++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 37 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index f438bd09..2c9c316b 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -115,7 +115,7 @@ recover(XNames, QNames) -> end, fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> case Tx of - true -> ok = sync_transient_binding(R, fun mnesia:write/3); + true -> ok = sync_transient_route(R, fun mnesia:write/3); false -> ok end, {ok, X} = rabbit_exchange:lookup(Src), @@ -151,8 +151,8 @@ add(Binding, InnerFun) -> add(Src, Dst, B) -> Durable = durable(Src) andalso durable(Dst), case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> ok = sync_binding(B, durable(Src), durable(Dst), - fun mnesia:write/3), + true -> ok = sync_route(#route{binding = B}, durable(Src), + durable(Dst), fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback(Src, add_binding, [Tx, Src, B]), rabbit_event:notify_if(not Tx, binding_created, @@ -177,8 +177,8 @@ remove(Binding, InnerFun) -> end). remove(Src, Dst, B) -> - ok = sync_binding(B, durable(Src), durable(Dst), - fun mnesia:delete_object/3), + ok = sync_route(#route{binding = B}, durable(Src), durable(Dst), + fun mnesia:delete_object/3), Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()), fun (Tx) -> ok = process_deletions(Deletions, Tx) end. @@ -248,17 +248,17 @@ remove_for_source(SrcName) -> mnesia:match_object(rabbit_route, Match, write) ++ mnesia:match_object(rabbit_durable_route, Match, write)), [begin - ok = mnesia:delete_object(rabbit_reverse_route, - reverse_route(Route), write), - ok = delete_forward_routes(Route), + sync_route(Route, fun mnesia:delete_object/3), Route#route.binding end || Route <- Routes]. -remove_for_destination(DstName) -> - remove_for_destination(DstName, fun delete_forward_routes/1). +remove_for_destination(Dst) -> + remove_for_destination( + Dst, fun (R) -> sync_route(R, fun mnesia:delete_object/3) end). -remove_transient_for_destination(DstName) -> - remove_for_destination(DstName, fun delete_transient_forward_routes/1). +remove_transient_for_destination(Dst) -> + remove_for_destination( + Dst, fun (R) -> sync_transient_route(R, fun mnesia:delete_object/3) end). %%---------------------------------------------------------------------------- @@ -275,20 +275,22 @@ binding_action(Binding = #binding{source = SrcName, Fun(Src, Dst, Binding#binding{args = SortedArgs}) end). -%% (Binding, SrcDurable, DstDurable, Fun) -sync_binding(Binding, true, true, Fun) -> - ok = Fun(rabbit_durable_route, #route{binding = Binding}, write), - sync_binding(Binding, false, true, Fun); +sync_route(R, Fun) -> sync_route(R, true, true, Fun). -sync_binding(Binding, false, true, Fun) -> - ok = Fun(rabbit_semi_durable_route, #route{binding = Binding}, write), - sync_binding(Binding, false, false, Fun); +%% (Route, SrcDurable, DstDurable, Fun) +sync_route(Route, true, true, Fun) -> + ok = Fun(rabbit_durable_route, Route, write), + sync_route(Route, false, true, Fun); -sync_binding(Binding, _SrcDurable, false, Fun) -> - sync_transient_binding(Binding, Fun). +sync_route(Route, false, true, Fun) -> + ok = Fun(rabbit_semi_durable_route, Route, write), + sync_route(Route, false, false, Fun); -sync_transient_binding(Binding, Fun) -> - {Route, ReverseRoute} = route_with_reverse(Binding), +sync_route(Route, _SrcDurable, false, Fun) -> + sync_transient_route(Route, Fun). + +sync_transient_route(Route, Fun) -> + ReverseRoute = reverse_route(Route), ok = Fun(rabbit_route, Route, write), ok = Fun(rabbit_reverse_route, ReverseRoute, write). @@ -365,20 +367,6 @@ maybe_auto_delete(XName, Bindings, Deletions) -> end, add_deletion(XName, Entry, Deletions1). -delete_forward_routes(Route) -> - ok = mnesia:delete_object(rabbit_route, Route, write), - ok = mnesia:delete_object(rabbit_semi_durable_route, Route, write), - ok = mnesia:delete_object(rabbit_durable_route, Route, write). - -delete_transient_forward_routes(Route) -> - ok = mnesia:delete_object(rabbit_route, Route, write). - -route_with_reverse(#route{binding = Binding}) -> - route_with_reverse(Binding); -route_with_reverse(Binding = #binding{}) -> - Route = #route{binding = Binding}, - {Route, reverse_route(Route)}. - reverse_route(#route{binding = Binding}) -> #reverse_route{reverse_binding = reverse_binding(Binding)}; -- cgit v1.2.1 From 65dab4241fb3edf8b1fd8da8fe9d5536623e6184 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 13 Apr 2011 15:27:29 +0100 Subject: Forgot to undo these bits, and the boot sequence has been changed, so debitrot --- src/rabbit_amqqueue_process.erl | 8 ++++++-- src/rabbit_mirror_queue_slave.erl | 8 ++++++-- src/rabbit_mirror_queue_slave_sup.erl | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 53bdd3b2..a8b19b72 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -1234,11 +1234,15 @@ handle_pre_hibernate(State = #q{backing_queue_state = undefined}) -> handle_pre_hibernate(State = #q{backing_queue = BQ, backing_queue_state = BQS, stats_timer = StatsTimer}) -> - BQS1 = rabbit_amqqueue_process_utils:backing_queue_pre_hibernate(BQ, BQS), + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + BQS3 = BQ:handle_pre_hibernate(BQS2), rabbit_event:if_enabled(StatsTimer, fun () -> emit_stats(State, [{idle_since, now()}]) end), State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer), - backing_queue_state = BQS1}, + backing_queue_state = BQS3}, {hibernate, stop_rate_timer(State1)}. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index e3cfe54d..cceb67e2 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -235,8 +235,12 @@ code_change(_OldVsn, State, _Extra) -> handle_pre_hibernate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQS1 = rabbit_amqqueue_process_utils:backing_queue_pre_hibernate(BQ, BQS), - {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS1 })}. + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + BQS3 = BQ:handle_pre_hibernate(BQS2), + {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}. prioritise_call(Msg, _From, _State) -> case Msg of diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl index 2fb3be51..25ee1fd0 100644 --- a/src/rabbit_mirror_queue_slave_sup.erl +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -19,7 +19,7 @@ -rabbit_boot_step({mirror_queue_slave_sup, [{description, "mirror queue slave sup"}, {mfa, {rabbit_mirror_queue_slave_sup, start, []}}, - {requires, queue_sup_queue_recovery}, + {requires, recovery}, {enables, routing_ready}]}). -rabbit_boot_step({mirrored_queues, -- cgit v1.2.1 From edf825027ac818f30ddfebf0c369661b6fa8a16b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 13 Apr 2011 17:50:59 +0100 Subject: Slight further simplification --- src/rabbit_binding.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 2c9c316b..e933e453 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -319,13 +319,11 @@ continue('$end_of_table') -> false; continue({[_|_], _}) -> true; continue({[], Continuation}) -> continue(mnesia:select(Continuation)). -remove_for_destination(DstName, FwdDeleteFun) -> +remove_for_destination(DstName, DeleteFun) -> Bindings = [begin Route = reverse_route(ReverseRoute), - ok = FwdDeleteFun(Route), - ok = mnesia:delete_object(rabbit_reverse_route, - ReverseRoute, write), + ok = DeleteFun(Route), Route#route.binding end || ReverseRoute <- mnesia:match_object( -- cgit v1.2.1 From b6dbd675e68a0c5fc75c9d916858fc2500e8316d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 13 Apr 2011 19:17:37 +0100 Subject: cosmetic(ish) --- src/rabbit_binding.erl | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index e933e453..7f269de3 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -149,10 +149,11 @@ add(Binding, InnerFun) -> end). add(Src, Dst, B) -> - Durable = durable(Src) andalso durable(Dst), - case (not Durable orelse mnesia:read({rabbit_durable_route, B}) =:= []) of - true -> ok = sync_route(#route{binding = B}, durable(Src), - durable(Dst), fun mnesia:write/3), + [SrcDurable, DstDurable] = [durable(E) || E <- [Src, Dst]], + case (not (SrcDurable andalso DstDurable) orelse + mnesia:read({rabbit_durable_route, B}) =:= []) of + true -> ok = sync_route(#route{binding = B}, SrcDurable, DstDurable, + fun mnesia:write/3), fun (Tx) -> ok = rabbit_exchange:callback(Src, add_binding, [Tx, Src, B]), rabbit_event:notify_if(not Tx, binding_created, @@ -277,7 +278,6 @@ binding_action(Binding = #binding{source = SrcName, sync_route(R, Fun) -> sync_route(R, true, true, Fun). -%% (Route, SrcDurable, DstDurable, Fun) sync_route(Route, true, true, Fun) -> ok = Fun(rabbit_durable_route, Route, write), sync_route(Route, false, true, Fun); @@ -320,19 +320,14 @@ continue({[_|_], _}) -> true; continue({[], Continuation}) -> continue(mnesia:select(Continuation)). remove_for_destination(DstName, DeleteFun) -> - Bindings = - [begin - Route = reverse_route(ReverseRoute), - ok = DeleteFun(Route), - Route#route.binding - end || ReverseRoute - <- mnesia:match_object( - rabbit_reverse_route, - reverse_route(#route{ - binding = #binding{ - destination = DstName, - _ = '_'}}), - write)], + Match = reverse_route( + #route{binding = #binding{destination = DstName, _ = '_'}}), + ReverseRoutes = mnesia:match_object(rabbit_reverse_route, Match, write), + Bindings = [begin + Route = reverse_route(ReverseRoute), + ok = DeleteFun(Route), + Route#route.binding + end || ReverseRoute <- ReverseRoutes], group_bindings_fold(fun maybe_auto_delete/3, new_deletions(), lists:keysort(#binding.source, Bindings)). -- cgit v1.2.1 From 009864788c13950c35d313f532b560bf7c7fe5fb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 14 Apr 2011 14:37:01 +0100 Subject: (new head of existing branch bug23831). The direct client links to the rabbit_channel. This is the first time that we have a process linking to the channel which is not the parent of the channel. Consequently, and because the channel has trap_exit turned on, the channel needs to be able to deal with EXIT messages that arrive for it which are not from its parent --- src/rabbit_channel.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 0c12614c..083c5963 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -303,7 +303,10 @@ handle_info({'DOWN', MRef, process, QPid, Reason}, handle_publishing_queue_down(QPid, Reason, State); {ok, ConsumerTag} -> handle_consuming_queue_down(MRef, ConsumerTag, State) - end). + end); + +handle_info({'EXIT', _Pid, Reason}, State) -> + {stop, Reason, State}. handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> ok = clear_permission_cache(), -- cgit v1.2.1 From 89244fcc94447c3d209982f332fda91f98610ef9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 14 Apr 2011 17:11:37 +0100 Subject: Simplify tap_trace_in/out, don't make msg_to_table return a tuple, refactor, cosmetics. --- src/rabbit_trace.erl | 74 +++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 41 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index f03903c5..96e85024 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -32,55 +32,41 @@ %%---------------------------------------------------------------------------- -tap_trace_in(Message = #basic_message{ - exchange_name = #resource{virtual_host = VHost, - name = XName}}) -> - check_trace( - XName, VHost, - fun (TraceExchange) -> - {EncodedMetadata, Payload} = message_to_table(Message), - publish(TraceExchange, VHost, <<"publish">>, XName, - EncodedMetadata, Payload) - end). - -tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, - Message = #basic_message{ - exchange_name = #resource{virtual_host = VHost, - name = XName}}}) -> - check_trace( - XName, VHost, - fun (TraceExchange) -> - RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - {EncodedMetadata, Payload} = message_to_table(Message), - Fields = [{<<"redelivered">>, signedint, RedeliveredNum}] - ++ EncodedMetadata, - publish(TraceExchange, VHost, <<"deliver">>, QName, - Fields, Payload) - end). - -check_trace(XName, VHost, F) -> - case application:get_env(rabbit, {trace_exchange, VHost}) of +tap_trace_in(Msg) -> + maybe_trace(Msg, <<"publish">>, xname(Msg), []). + +tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}) -> + RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, + maybe_trace(Msg, <<"deliver">>, QName, + [{<<"redelivered">>, signedint, RedeliveredNum}]). + +xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. +vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. + +maybe_trace(Msg, RKPrefix, RKSuffix, Extra) -> + XName = xname(Msg), + case application:get_env(rabbit, {trace_exchange, vhost(Msg)}) of undefined -> ok; {ok, XName} -> ok; - {ok, TraceX} -> case catch F(TraceX) of + {ok, TraceX} -> case catch trace(TraceX, Msg, RKPrefix, RKSuffix, + Extra) of {'EXIT', R} -> rabbit_log:info( "Trace tap died: ~p~n", [R]); ok -> ok end end. -publish(TraceExchange, VHost, RKPrefix, RKSuffix, Table, Payload) -> - rabbit_basic:publish(rabbit_misc:r(VHost, exchange, TraceExchange), +trace(TraceX, Msg0, RKPrefix, RKSuffix, Extra) -> + Msg = ensure_content_decoded(Msg0), + rabbit_basic:publish(rabbit_misc:r(vhost(Msg), exchange, TraceX), <>, - #'P_basic'{headers = Table}, Payload), + #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, + payload(Msg)), ok. -message_to_table(#basic_message{exchange_name = #resource{name = XName}, - routing_keys = RoutingKeys, - content = Content}) -> - #content{properties = Props, - payload_fragments_rev = PFR} = - rabbit_binary_parser:ensure_content_decoded(Content), +msg_to_table(#basic_message{exchange_name = #resource{name = XName}, + routing_keys = RoutingKeys, + content = #content{properties = Props}}) -> {PropsTable, _Ix} = lists:foldl( fun (K, {L, Ix}) -> @@ -91,11 +77,17 @@ message_to_table(#basic_message{exchange_name = #resource{name = XName}, end, {NewL, Ix + 1} end, {[], 2}, record_info(fields, 'P_basic')), - {[{<<"exchange_name">>, longstr, XName}, + [{<<"exchange_name">>, longstr, XName}, {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, {<<"properties">>, table, PropsTable}, - {<<"node">>, longstr, a2b(node())}], - list_to_binary(lists:reverse(PFR))}. + {<<"node">>, longstr, a2b(node())}]. + +payload(#basic_message{content = #content{payload_fragments_rev = PFR}}) -> + list_to_binary(lists:reverse(PFR)). + +ensure_content_decoded(Msg = #basic_message{content = Content}) -> + Msg#basic_message{content = rabbit_binary_parser:ensure_content_decoded( + Content)}. a2b(A) -> list_to_binary(atom_to_list(A)). -- cgit v1.2.1 From 9dc2bed83820ef17e9257177fe3d06a2ec7c038c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 14 Apr 2011 18:14:23 +0100 Subject: Allow the erlang client to remain ignorant of rabbit_event. --- src/rabbit_direct.erl | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 0810c762..df422c15 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -16,7 +16,7 @@ -module(rabbit_direct). --export([boot/0, connect/4, start_channel/8]). +-export([boot/0, connect/5, start_channel/8, disconnect/1]). -include("rabbit.hrl"). @@ -25,7 +25,8 @@ -ifdef(use_specs). -spec(boot/0 :: () -> 'ok'). --spec(connect/4 :: (binary(), binary(), binary(), rabbit_types:protocol()) -> +-spec(connect/5 :: (binary(), binary(), binary(), rabbit_types:protocol(), + term()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). -spec(start_channel/8 :: @@ -33,6 +34,8 @@ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}). +-spec(disconnect/1 :: (pid()) -> 'ok'). + -endif. %%---------------------------------------------------------------------------- @@ -50,13 +53,14 @@ boot() -> %%---------------------------------------------------------------------------- -connect(Username, Password, VHost, Protocol) -> +connect(Username, Password, VHost, Protocol, Infos) -> case lists:keymember(rabbit, 1, application:which_applications()) of true -> try rabbit_access_control:user_pass_login(Username, Password) of #user{} = User -> try rabbit_access_control:check_vhost_access(User, VHost) of - ok -> {ok, {User, + ok -> rabbit_event:notify(connection_created, Infos), + {ok, {User, rabbit_reader:server_properties(Protocol)}} catch exit:#amqp_error{name = access_refused} -> @@ -77,3 +81,6 @@ start_channel(Number, ClientChannelPid, ConnPid, Protocol, User, VHost, [{direct, Number, ClientChannelPid, ConnPid, Protocol, User, VHost, Capabilities, Collector}]), {ok, ChannelPid}. + +disconnect(Pid) -> + rabbit_event:notify(connection_closed, [{pid, Pid}]). -- cgit v1.2.1 From 5e29f02be887b4f15fee8802b9e16644204169ec Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 15 Apr 2011 13:16:03 +0100 Subject: tiny refactor --- src/rabbit_binding.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 7f269de3..dc119fbd 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -290,9 +290,8 @@ sync_route(Route, _SrcDurable, false, Fun) -> sync_transient_route(Route, Fun). sync_transient_route(Route, Fun) -> - ReverseRoute = reverse_route(Route), ok = Fun(rabbit_route, Route, write), - ok = Fun(rabbit_reverse_route, ReverseRoute, write). + ok = Fun(rabbit_reverse_route, reverse_route(Route), write). call_with_source_and_destination(SrcName, DstName, Fun) -> SrcTable = table_for_resource(SrcName), -- cgit v1.2.1 From effab8c369e7427883687f367871befb86422b71 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 15 Apr 2011 13:33:36 +0100 Subject: Symmetry, specs. --- src/rabbit_direct.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index df422c15..0dac18d1 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -26,7 +26,7 @@ -spec(boot/0 :: () -> 'ok'). -spec(connect/5 :: (binary(), binary(), binary(), rabbit_types:protocol(), - term()) -> + rabbit_event:event_props()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). -spec(start_channel/8 :: @@ -34,7 +34,7 @@ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}). --spec(disconnect/1 :: (pid()) -> 'ok'). +-spec(disconnect/1 :: (rabbit_event:event_props()) -> 'ok'). -endif. @@ -82,5 +82,5 @@ start_channel(Number, ClientChannelPid, ConnPid, Protocol, User, VHost, Capabilities, Collector}]), {ok, ChannelPid}. -disconnect(Pid) -> - rabbit_event:notify(connection_closed, [{pid, Pid}]). +disconnect(Infos) -> + rabbit_event:notify(connection_closed, Infos). -- cgit v1.2.1 From 2f874e49bc2e809fdb8e6ba4905113f2ba9419c6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 24 Apr 2011 11:11:05 +0100 Subject: Rotate logs uses ctl, not server. Also die if ctl doesn't exist or isn't executable --- packaging/common/rabbitmq-server.init | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/common/rabbitmq-server.init b/packaging/common/rabbitmq-server.init index f3bdc3d2..d8a7a94d 100644 --- a/packaging/common/rabbitmq-server.init +++ b/packaging/common/rabbitmq-server.init @@ -28,6 +28,7 @@ INIT_LOG_DIR=/var/log/rabbitmq LOCK_FILE= # This is filled in when building packages test -x $DAEMON || exit 0 +test -x $CONTROL || exit 0 RETVAL=0 set -e @@ -94,7 +95,7 @@ status_rabbitmq() { rotate_logs_rabbitmq() { set +e - $DAEMON rotate_logs ${ROTATE_SUFFIX} + $CONTROL rotate_logs ${ROTATE_SUFFIX} if [ $? != 0 ] ; then RETVAL=1 fi -- cgit v1.2.1 From 4f5bd52ef39690f96dec9f8b71447366248cdd67 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 26 Apr 2011 17:10:35 +0100 Subject: optimise common case that buf contains a single binary This turns out to improve performance *a lot*. --- src/rabbit_reader.erl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 4dcb7446..5ecb2e73 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -246,8 +246,11 @@ recvloop(Deb, State = #v1{sock = Sock, recv_length = Length, buf = Buf}) -> case iolist_size(Buf) < Length of true -> ok = rabbit_net:setopts(Sock, [{active, once}]), mainloop(Deb, State#v1{pending_recv = true}); - false -> {Data, Rest} = split_binary( - list_to_binary(lists:reverse(Buf)), Length), + false -> {Data, Rest} = split_binary(case Buf of + [B] -> B; + _ -> list_to_binary( + lists:reverse(Buf)) + end, Length), recvloop(Deb, handle_input(State#v1.callback, Data, State#v1{buf = [Rest]})) end. -- cgit v1.2.1 From d7f0084e0b35d4cfeffa35efa74169df51d2182e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 28 Apr 2011 15:26:17 +0100 Subject: Precisely provide the same api as file:write_file/2,3 --- src/rabbit_misc.erl | 48 +++++++++++++++++++++++++++++++----------------- src/rabbit_prelaunch.erl | 3 +-- src/rabbit_tests.erl | 2 +- 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index d82ef7f3..3451724e 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -40,7 +40,7 @@ -export([upmap/2, map_in_order/2]). -export([table_filter/3]). -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). --export([read_term_file/1, write_term_file/2, write_file/3]). +-export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]). -export([append_file/2, ensure_parent_dirs_exist/1]). -export([format_stderr/2]). -export([start_applications/1, stop_applications/1]). @@ -516,32 +516,46 @@ dirty_dump_log1(LH, {K, Terms, BadBytes}) -> read_term_file(File) -> file:consult(File). write_term_file(File, Terms) -> - write_file(File, false, list_to_binary([io_lib:format("~w.~n", [Term]) || - Term <- Terms])). - -write_file(Path, Append, Binary) when is_binary(Binary) -> - Modes = [binary, write, raw | case Append of - true -> [read]; - false -> [] - end], - case file:open(Path, Modes) of - {ok, Hdl} -> - case file:position(Hdl, eof) of - {ok, _Pos} -> - case file:write(Hdl, Binary) of + write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) || + Term <- Terms])). + +write_file(Path, Data) -> + write_file(Path, Data, []). + +write_file(Path, Data, Modes) -> + Modes1 = [binary, write | (Modes -- [binary, write])], + case make_binary(Data) of + Bin when is_binary(Bin) -> + case file:open(Path, Modes1) of + {ok, Hdl} -> + case file:write(Hdl, Bin) of ok -> case file:sync(Hdl) of ok -> file:close(Hdl); - {error, _} = E -> E + {error, _} = E -> + file:close(Hdl), + E end; - {error, _} = E -> E + {error, _} = E -> + file:close(Hdl), + E end; {error, _} = E -> E end; {error, _} = E -> E end. +make_binary(Bin) when is_binary(Bin) -> + Bin; +make_binary(List) -> + try + iolist_to_binary(List) + catch error:Reason -> + {error, Reason} + end. + + append_file(File, Suffix) -> case file:read_file_info(File) of {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix); @@ -558,7 +572,7 @@ append_file(File, 0, Suffix) -> end; append_file(File, _, Suffix) -> case file:read_file(File) of - {ok, Data} -> write_file(File ++ Suffix, true, Data); + {ok, Data} -> write_file([File, Suffix], Data, [append]); Error -> Error end. diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index f7218fbd..2512a602 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -67,8 +67,7 @@ start() -> AppVersions}, %% Write it out to $RABBITMQ_PLUGINS_EXPAND_DIR/rabbit.rel - rabbit_misc:write_file(RootName ++ ".rel", false, - list_to_binary(io_lib:format("~p.~n", [RDesc]))), + rabbit_misc:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])), %% Compile the script ScriptFile = RootName ++ ".script", diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 93a5f732..45dd39a2 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1607,7 +1607,7 @@ test_file_handle_cache() -> [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]], Content = <<"foo">>, CopyFun = fun (Src, Dst) -> - ok = rabbit_misc:write_file(Src, false, Content), + ok = rabbit_misc:write_file(Src, Content), {ok, SrcHdl} = file_handle_cache:open(Src, [read], []), {ok, DstHdl} = file_handle_cache:open(Dst, [write], []), Size = size(Content), -- cgit v1.2.1 From 91a8dc7db00173ee792db631ab6a181566f8c8d5 Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Fri, 29 Apr 2011 05:43:20 +0000 Subject: Bump sleep times during tests to 100ms. This fixes timing issues I was seeing on OpenBSD. Solution pointed out by Matthew Sackman. --- src/rabbit_tests.erl | 2 +- src/test_sup.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 524e8e6e..6cb0dbf4 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1553,7 +1553,7 @@ test_logs_working(MainLogFile, SaslLogFile) -> ok = rabbit_log:error("foo bar"), ok = error_logger:error_report(crash_report, [foo, bar]), %% give the error loggers some time to catch up - timer:sleep(50), + timer:sleep(100), [true, true] = non_empty_files([MainLogFile, SaslLogFile]), ok. diff --git a/src/test_sup.erl b/src/test_sup.erl index 150235da..84c4121c 100644 --- a/src/test_sup.erl +++ b/src/test_sup.erl @@ -33,10 +33,10 @@ test_supervisor_delayed_restart() -> test_supervisor_delayed_restart(SupPid) -> ok = ping_child(SupPid), ok = exit_child(SupPid), - timer:sleep(10), + timer:sleep(100), ok = ping_child(SupPid), ok = exit_child(SupPid), - timer:sleep(10), + timer:sleep(100), timeout = ping_child(SupPid), timer:sleep(1010), ok = ping_child(SupPid), -- cgit v1.2.1 From b50b5fe89b3a96a8d22c24586cc9ad1eed89425e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 3 May 2011 15:33:33 +0100 Subject: Separate out different types for network and direct connections, don't check the password for direct ones. --- src/rabbit_direct.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 0dac18d1..7ad63e81 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -16,7 +16,7 @@ -module(rabbit_direct). --export([boot/0, connect/5, start_channel/8, disconnect/1]). +-export([boot/0, connect/4, start_channel/8, disconnect/1]). -include("rabbit.hrl"). @@ -25,7 +25,7 @@ -ifdef(use_specs). -spec(boot/0 :: () -> 'ok'). --spec(connect/5 :: (binary(), binary(), binary(), rabbit_types:protocol(), +-spec(connect/4 :: (binary(), binary(), rabbit_types:protocol(), rabbit_event:event_props()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). @@ -53,11 +53,11 @@ boot() -> %%---------------------------------------------------------------------------- -connect(Username, Password, VHost, Protocol, Infos) -> +connect(Username, VHost, Protocol, Infos) -> case lists:keymember(rabbit, 1, application:which_applications()) of true -> - try rabbit_access_control:user_pass_login(Username, Password) of - #user{} = User -> + case rabbit_access_control:check_user_login(Username, []) of + {ok, User} -> try rabbit_access_control:check_vhost_access(User, VHost) of ok -> rabbit_event:notify(connection_created, Infos), {ok, {User, @@ -65,9 +65,9 @@ connect(Username, Password, VHost, Protocol, Infos) -> catch exit:#amqp_error{name = access_refused} -> {error, access_refused} - end - catch - exit:#amqp_error{name = access_refused} -> {error, auth_failure} + end; + {refused, _Msg, _Args} -> + {error, auth_failure} end; false -> {error, broker_not_found_on_node} -- cgit v1.2.1 From 7f709cc3ef1440a29506c0fafd9a650fda9fab36 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 3 May 2011 17:17:52 +0100 Subject: Remove dead code. --- src/rabbit_access_control.erl | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl index b0b57af4..59c00848 100644 --- a/src/rabbit_access_control.erl +++ b/src/rabbit_access_control.erl @@ -18,7 +18,7 @@ -include("rabbit.hrl"). --export([user_pass_login/2, check_user_pass_login/2, check_user_login/2, +-export([check_user_pass_login/2, check_user_login/2, check_vhost_access/2, check_resource_access/3, list_vhosts/2]). %%---------------------------------------------------------------------------- @@ -30,9 +30,6 @@ -type(permission_atom() :: 'configure' | 'read' | 'write'). -type(vhost_permission_atom() :: 'read' | 'write'). --spec(user_pass_login/2 :: - (rabbit_types:username(), rabbit_types:password()) - -> rabbit_types:user() | rabbit_types:channel_exit()). -spec(check_user_pass_login/2 :: (rabbit_types:username(), rabbit_types:password()) -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}). @@ -49,16 +46,6 @@ %%---------------------------------------------------------------------------- -user_pass_login(User, Pass) -> - ?LOGDEBUG("Login with user ~p pass ~p~n", [User, Pass]), - case check_user_pass_login(User, Pass) of - {refused, Msg, Args} -> - rabbit_misc:protocol_error( - access_refused, "login refused: ~s", [io_lib:format(Msg, Args)]); - {ok, U} -> - U - end. - check_user_pass_login(Username, Password) -> check_user_login(Username, [{password, Password}]). -- cgit v1.2.1 From ab8c1770d1a0e687e2e363b3d73cd9482640fd01 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 3 May 2011 17:26:52 +0100 Subject: I cannot BELIEVE that anyone was so UNPROFFESSIONNAL as to OMIT these TYPES. --- src/rabbit_direct.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl index 7ad63e81..7ff534ee 100644 --- a/src/rabbit_direct.erl +++ b/src/rabbit_direct.erl @@ -25,8 +25,8 @@ -ifdef(use_specs). -spec(boot/0 :: () -> 'ok'). --spec(connect/4 :: (binary(), binary(), rabbit_types:protocol(), - rabbit_event:event_props()) -> +-spec(connect/4 :: (rabbit_types:username(), rabbit_types:vhost(), + rabbit_types:protocol(), rabbit_event:event_props()) -> {'ok', {rabbit_types:user(), rabbit_framing:amqp_table()}}). -spec(start_channel/8 :: -- cgit v1.2.1 From 18fe42a37a41ea8bc1878716077e9fe4e7c6cdea Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 5 May 2011 11:13:25 +0100 Subject: Remove pointless diffs from default. --- src/rabbit_exchange.erl | 6 +++--- src/rabbit_upgrade_functions.erl | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 28a639ac..84a44cd2 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -36,7 +36,7 @@ -type(type() :: atom()). -type(fun_name() :: atom()). --spec(recover/0 :: () -> [rabbit_types:resource()]). +-spec(recover/0 :: () -> [name()]). -spec(callback/3:: (rabbit_types:exchange(), fun_name(), [any()]) -> 'ok'). -spec(declare/6 :: (name(), type(), boolean(), boolean(), boolean(), @@ -98,8 +98,8 @@ recover() -> rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. -callback(#exchange{type = Type}, Fun, Args) -> - apply(type_to_module(Type), Fun, Args). +callback(#exchange{type = XType}, Fun, Args) -> + apply(type_to_module(XType), Fun, Args). declare(XName, Type, Durable, AutoDelete, Internal, Args) -> X = #exchange{name = XName, diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 1bd9a7c3..31bbb929 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -108,7 +108,7 @@ topic_trie() -> semi_durable_route() -> create(rabbit_semi_durable_route, [{record_name, route}, {attributes, [binding, value]}]). - + exchange_event_serial() -> create(rabbit_exchange_serial, [{record_name, exchange_serial}, {attributes, [name, next]}]). -- cgit v1.2.1 From 9cd3070bd66344ddc1dc31b5fcf4ea2461cb0805 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 5 May 2011 11:18:19 +0100 Subject: 80 columns. --- src/rabbit_binding.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 1944792e..2f71bfab 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -156,10 +156,12 @@ add(Src, Dst, B) -> mnesia:read({rabbit_durable_route, B}) =:= []) of true -> ok = sync_route(#route{binding = B}, SrcDurable, DstDurable, fun mnesia:write/3), - ok = rabbit_exchange:callback(Src, add_binding, [transaction, Src, B]), + ok = rabbit_exchange:callback( + Src, add_binding, [transaction, Src, B]), Serial = rabbit_exchange:serial(Src), fun () -> - ok = rabbit_exchange:callback(Src, add_binding, [Serial, Src, B]), + ok = rabbit_exchange:callback( + Src, add_binding, [Serial, Src, B]), ok = rabbit_event:notify(binding_created, info(B)) end; false -> rabbit_misc:const({error, binding_not_found}) -- cgit v1.2.1 From 1a6d3f69ef02454d5723cdcc06a5a788d821201f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 5 May 2011 14:57:47 +0100 Subject: trace_exchange -> trace_exchanges --- src/rabbit_trace.erl | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 96e85024..a2cb5027 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -45,15 +45,19 @@ vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. maybe_trace(Msg, RKPrefix, RKSuffix, Extra) -> XName = xname(Msg), - case application:get_env(rabbit, {trace_exchange, vhost(Msg)}) of - undefined -> ok; - {ok, XName} -> ok; - {ok, TraceX} -> case catch trace(TraceX, Msg, RKPrefix, RKSuffix, - Extra) of - {'EXIT', R} -> rabbit_log:info( - "Trace tap died: ~p~n", [R]); - ok -> ok - end + case trace_exchange(vhost(Msg)) of + none -> ok; + XName -> ok; + TraceX -> case catch trace(TraceX, Msg, RKPrefix, RKSuffix, Extra) of + {'EXIT', R} -> rabbit_log:info("Trace died: ~p~n", [R]); + ok -> ok + end + end. + +trace_exchange(VHost) -> + case application:get_env(rabbit, trace_exchanges) of + undefined -> none; + {ok, Xs} -> proplists:get_value(VHost, Xs, none) end. trace(TraceX, Msg0, RKPrefix, RKSuffix, Extra) -> -- cgit v1.2.1 From 8dff232b17a8a50a617df3b4d4ccca3a890365c3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 9 May 2011 15:17:38 +0100 Subject: How did these tabs get here? --- src/rabbit_net.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_net.erl b/src/rabbit_net.erl index c500548a..cbdadd16 100644 --- a/src/rabbit_net.erl +++ b/src/rabbit_net.erl @@ -28,8 +28,8 @@ -export_type([socket/0]). -type(stat_option() :: - 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' | - 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'). + 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' | + 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'). -type(ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any())). -type(ok_or_any_error() :: rabbit_types:ok_or_error(any())). -type(socket() :: port() | #ssl_socket{}). -- cgit v1.2.1 From 493b759a1de2fcccc9c15712a711f51aeba709c5 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 10 May 2011 05:14:43 +0100 Subject: refactor: extract commonality between tcp and ssl recv ...and add a guard, for consistency with other funs --- src/rabbit_net.erl | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/rabbit_net.erl b/src/rabbit_net.erl index b6cc28af..53aba46a 100644 --- a/src/rabbit_net.erl +++ b/src/rabbit_net.erl @@ -87,20 +87,16 @@ getstat(Sock, Stats) when is_port(Sock) -> inet:getstat(Sock, Stats). recv(Sock) when ?IS_SSL(Sock) -> - S = Sock#ssl_socket.ssl, - receive - {ssl, S, Data} -> {data, Data}; - {ssl_closed, S} -> closed; - {ssl_error, S, Reason} -> {error, Reason}; - Other -> {other, Other} - end; -recv(Sock) -> - S = Sock, + recv(Sock#ssl_socket.ssl, {ssl, ssl_closed, ssl_error}); +recv(Sock) when is_port(Sock) -> + recv(Sock, {tcp, tcp_closed, tcp_error}). + +recv(S, {DataTag, ClosedTag, ErrorTag}) -> receive - {tcp, S, Data} -> {data, Data}; - {tcp_closed, S} -> closed; - {tcp_error, S, Reason} -> {error, Reason}; - Other -> {other, Other} + {DataTag, S, Data} -> {data, Data}; + {ClosedTag, S} -> closed; + {ErrorTag, S, Reason} -> {error, Reason}; + Other -> {other, Other} end. async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) -> -- cgit v1.2.1 From 8dbe0cd6721794063e1f043afdaabb0beb1afcb3 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 10 May 2011 06:00:04 +0100 Subject: track buffer size explicitly, resulting in a small performance improvement Also rename recv_length to recv_len for consistency and brevity. --- src/rabbit_reader.erl | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 5ecb2e73..9df67352 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -38,9 +38,9 @@ %%-------------------------------------------------------------------------- --record(v1, {parent, sock, connection, callback, recv_length, pending_recv, +-record(v1, {parent, sock, connection, callback, recv_len, pending_recv, connection_state, queue_collector, heartbeater, stats_timer, - channel_sup_sup_pid, start_heartbeat_fun, buf, + channel_sup_sup_pid, start_heartbeat_fun, buf, buf_len, auth_mechanism, auth_state}). -define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt, @@ -204,7 +204,7 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, client_properties = none, capabilities = []}, callback = uninitialized_callback, - recv_length = 0, + recv_len = 0, pending_recv = false, connection_state = pre_init, queue_collector = Collector, @@ -214,6 +214,7 @@ start_connection(Parent, ChannelSupSupPid, Collector, StartHeartbeatFun, Deb, channel_sup_sup_pid = ChannelSupSupPid, start_heartbeat_fun = StartHeartbeatFun, buf = [], + buf_len = 0, auth_mechanism = none, auth_state = none }, @@ -242,22 +243,23 @@ recvloop(Deb, State = #v1{pending_recv = true}) -> mainloop(Deb, State); recvloop(Deb, State = #v1{connection_state = blocked}) -> mainloop(Deb, State); -recvloop(Deb, State = #v1{sock = Sock, recv_length = Length, buf = Buf}) -> - case iolist_size(Buf) < Length of - true -> ok = rabbit_net:setopts(Sock, [{active, once}]), - mainloop(Deb, State#v1{pending_recv = true}); - false -> {Data, Rest} = split_binary(case Buf of - [B] -> B; - _ -> list_to_binary( - lists:reverse(Buf)) - end, Length), - recvloop(Deb, handle_input(State#v1.callback, Data, - State#v1{buf = [Rest]})) - end. - -mainloop(Deb, State = #v1{sock = Sock}) -> +recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen}) + when BufLen < RecvLen -> + ok = rabbit_net:setopts(Sock, [{active, once}]), + mainloop(Deb, State#v1{pending_recv = true}); +recvloop(Deb, State = #v1{recv_len = RecvLen, buf = Buf, buf_len = BufLen}) -> + {Data, Rest} = split_binary(case Buf of + [B] -> B; + _ -> list_to_binary(lists:reverse(Buf)) + end, RecvLen), + recvloop(Deb, handle_input(State#v1.callback, Data, + State#v1{buf = [Rest], + buf_len = BufLen - RecvLen})). + +mainloop(Deb, State = #v1{sock = Sock, buf = Buf, buf_len = BufLen}) -> case rabbit_net:recv(Sock) of - {data, Data} -> recvloop(Deb, State#v1{buf = [Data | State#v1.buf], + {data, Data} -> recvloop(Deb, State#v1{buf = [Data | Buf], + buf_len = BufLen + size(Data), pending_recv = false}); closed -> if State#v1.connection_state =:= closed -> State; @@ -332,9 +334,9 @@ handle_other(Other, _Deb, _State) -> switch_callback(State = #v1{connection_state = blocked, heartbeater = Heartbeater}, Callback, Length) -> ok = rabbit_heartbeat:pause_monitor(Heartbeater), - State#v1{callback = Callback, recv_length = Length}; + State#v1{callback = Callback, recv_len = Length}; switch_callback(State, Callback, Length) -> - State#v1{callback = Callback, recv_length = Length}. + State#v1{callback = Callback, recv_len = Length}. terminate(Explanation, State) when ?IS_RUNNING(State) -> {normal, send_exception(State, 0, -- cgit v1.2.1 From 8d6b67929bf5dcfbfad62c3e1fd232056a99c3f5 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 10 May 2011 16:05:43 +0100 Subject: Indentation --- src/rabbit_trace.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index a2cb5027..2d15e7fc 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -82,9 +82,9 @@ msg_to_table(#basic_message{exchange_name = #resource{name = XName}, {NewL, Ix + 1} end, {[], 2}, record_info(fields, 'P_basic')), [{<<"exchange_name">>, longstr, XName}, - {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, - {<<"properties">>, table, PropsTable}, - {<<"node">>, longstr, a2b(node())}]. + {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, + {<<"properties">>, table, PropsTable}, + {<<"node">>, longstr, a2b(node())}]. payload(#basic_message{content = #content{payload_fragments_rev = PFR}}) -> list_to_binary(lists:reverse(PFR)). -- cgit v1.2.1 From 61349821a03d50016b4c73bf14d138970c1547ba Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 10 May 2011 18:19:19 +0100 Subject: Fix racy bug in tests as reported by Simon. Bug created in bug24037 I suspect --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 6cb0dbf4..095e358a 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2074,7 +2074,7 @@ test_queue_index() -> variable_queue_init(Q, Recover) -> rabbit_variable_queue:init( - Q, Recover, fun nop/1, fun nop/1, fun nop/2, fun nop/1). + Q, Recover, fun nop/2, fun nop/2, fun nop/2, fun nop/1). variable_queue_publish(IsPersistent, Count, VQ) -> lists:foldl( -- cgit v1.2.1 From 15a75b7720bfe433511e7422546cc6fe4d747a0b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 11 May 2011 16:39:38 +0100 Subject: Use erl files rather than beam files to generate the app module list. --- Makefile | 4 ++-- generate_app | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index cdb86aad..c07c043e 100644 --- a/Makefile +++ b/Makefile @@ -93,8 +93,8 @@ $(DEPS_FILE): $(SOURCES) $(INCLUDES) rm -f $@ echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR) -$(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(BEAM_TARGETS) generate_app - escript generate_app $(EBIN_DIR) $@ < $< +$(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app + escript generate_app $(SOURCE_DIR) $@ < $< $(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE) erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $< diff --git a/generate_app b/generate_app index 576b485e..10e5bafd 100644 --- a/generate_app +++ b/generate_app @@ -1,9 +1,9 @@ #!/usr/bin/env escript %% -*- erlang -*- -main([BeamDir, TargetFile]) -> - Modules = [list_to_atom(filename:basename(F, ".beam")) || - F <- filelib:wildcard("*.beam", BeamDir)], +main([SrcDir, TargetFile]) -> + Modules = [list_to_atom(filename:basename(F, ".erl")) || + F <- filelib:wildcard("*.erl", SrcDir)], {ok, {application, Application, Properties}} = io:read(''), NewProperties = lists:keyreplace(modules, 1, Properties, {modules, Modules}), -- cgit v1.2.1 From 247b06a9f3da109be6d493ac36849a7a83d36c1b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 11 May 2011 16:55:57 +0100 Subject: If we see a list of modules there already, trust it. --- generate_app | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/generate_app b/generate_app index 10e5bafd..2246971d 100644 --- a/generate_app +++ b/generate_app @@ -5,8 +5,11 @@ main([SrcDir, TargetFile]) -> Modules = [list_to_atom(filename:basename(F, ".erl")) || F <- filelib:wildcard("*.erl", SrcDir)], {ok, {application, Application, Properties}} = io:read(''), - NewProperties = lists:keyreplace(modules, 1, Properties, - {modules, Modules}), + NewProperties = + case proplists:get_value(modules, Properties) of + [] -> lists:keyreplace(modules, 1, Properties, {modules, Modules}); + _ -> Properties + end, file:write_file( TargetFile, io_lib:format("~p.~n", [{application, Application, NewProperties}])). -- cgit v1.2.1 From e42b14f7a1378cada2ca6994472be83b5411446f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 12 May 2011 11:43:06 +0100 Subject: Support multiple source dirs --- Makefile | 2 +- generate_app | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c07c043e..3a40f606 100644 --- a/Makefile +++ b/Makefile @@ -94,7 +94,7 @@ $(DEPS_FILE): $(SOURCES) $(INCLUDES) echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR) $(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app - escript generate_app $(SOURCE_DIR) $@ < $< + escript generate_app $@ $(SOURCE_DIR) < $< $(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE) erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $< diff --git a/generate_app b/generate_app index 2246971d..d8813542 100644 --- a/generate_app +++ b/generate_app @@ -1,8 +1,9 @@ #!/usr/bin/env escript %% -*- erlang -*- -main([SrcDir, TargetFile]) -> +main([TargetFile | SrcDirs]) -> Modules = [list_to_atom(filename:basename(F, ".erl")) || + SrcDir <- SrcDirs, F <- filelib:wildcard("*.erl", SrcDir)], {ok, {application, Application, Properties}} = io:read(''), NewProperties = -- cgit v1.2.1 From 6407282e346496b39e672392cb7211d2d6f49ab0 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sun, 15 May 2011 17:17:05 +0100 Subject: shrink --- src/rabbit_misc.erl | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 3451724e..ff31921e 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -527,20 +527,12 @@ write_file(Path, Data, Modes) -> case make_binary(Data) of Bin when is_binary(Bin) -> case file:open(Path, Modes1) of - {ok, Hdl} -> - case file:write(Hdl, Bin) of - ok -> - case file:sync(Hdl) of - ok -> - file:close(Hdl); - {error, _} = E -> - file:close(Hdl), - E - end; - {error, _} = E -> - file:close(Hdl), - E - end; + {ok, Hdl} -> try file:write(Hdl, Bin) of + ok -> file:sync(Hdl); + {error, _} = E -> E + after + file:close(Hdl) + end; {error, _} = E -> E end; {error, _} = E -> E -- cgit v1.2.1 From 9080f592a2d9413138ee46a079a6ac761459a75c Mon Sep 17 00:00:00 2001 From: Rob Harrop Date: Mon, 16 May 2011 14:58:55 +0100 Subject: Fixed call to validate_message --- src/rabbit_mirror_queue_master.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 481ee7c4..f54c8c37 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -287,7 +287,7 @@ is_duplicate(none, Message = #basic_message { id = MsgId }, error -> %% We permit the underlying BQ to have a peek at it, but %% only if we ourselves are not filtering out the msg. - {Result, BQS1} = BQ:validate_message(Message, BQS), + {Result, BQS1} = BQ:is_duplicate(none, Message, BQS), {Result, State #state { backing_queue_state = BQS1 }}; {ok, published} -> %% It already got published when we were a slave and no -- cgit v1.2.1 From 5176aecef993077cf26c81077495cc9527d5ec2e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 16 May 2011 17:36:41 +0100 Subject: Correct specs --- src/rabbit_misc.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index ff31921e..c52e817b 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -154,7 +154,8 @@ -spec(read_term_file/1 :: (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())). -spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()). --spec(write_file/3 :: (file:filename(), boolean(), binary()) -> ok_or_error()). +-spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()). +-spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()). -spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). -spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). -spec(format_stderr/2 :: (string(), [any()]) -> 'ok'). -- cgit v1.2.1 From 4ec37f3d3ce122a8e0d6a69fdd3843f83831416f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 16 May 2011 22:08:12 +0100 Subject: Added origination comment --- src/rabbit_misc.erl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index c52e817b..a37f5dcc 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -523,6 +523,10 @@ write_term_file(File, Terms) -> write_file(Path, Data) -> write_file(Path, Data, []). +%% write_file/3 is based on the implementation the kernel/file.erl +%% file of the Erlang R14B02 release, licensed under the EPL. That +%% implementation does not do an fsync prior to closing the file, +%% hence the existence of this version. write_file(Path, Data, Modes) -> Modes1 = [binary, write | (Modes -- [binary, write])], case make_binary(Data) of -- cgit v1.2.1 From 3b01df91db1e37fa44cc3e5e6414c4d75661867d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 16 May 2011 22:08:59 +0100 Subject: English --- src/rabbit_misc.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index a37f5dcc..9895a137 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -523,7 +523,7 @@ write_term_file(File, Terms) -> write_file(Path, Data) -> write_file(Path, Data, []). -%% write_file/3 is based on the implementation the kernel/file.erl +%% write_file/3 is based on the implementation in the kernel/file.erl %% file of the Erlang R14B02 release, licensed under the EPL. That %% implementation does not do an fsync prior to closing the file, %% hence the existence of this version. -- cgit v1.2.1 From 7981ac3166a4cee4f88a346a75ebd6d1c78ce95f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 16 May 2011 22:11:06 +0100 Subject: Extra words --- src/rabbit_misc.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 9895a137..9a675fbb 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -523,10 +523,10 @@ write_term_file(File, Terms) -> write_file(Path, Data) -> write_file(Path, Data, []). -%% write_file/3 is based on the implementation in the kernel/file.erl -%% file of the Erlang R14B02 release, licensed under the EPL. That -%% implementation does not do an fsync prior to closing the file, -%% hence the existence of this version. +%% write_file/3 and make_binary/1 is based on the implementation in +%% the kernel/file.erl file of the Erlang R14B02 release, licensed +%% under the EPL. That implementation does not do an fsync prior to +%% closing the file, hence the existence of this version. write_file(Path, Data, Modes) -> Modes1 = [binary, write | (Modes -- [binary, write])], case make_binary(Data) of -- cgit v1.2.1 From 4185fcfa5035d0c3301a4868c4f9c00766d2c04d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 16 May 2011 22:31:56 +0100 Subject: Closer approximation to English --- src/rabbit_misc.erl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 9a675fbb..2d433ac2 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -523,10 +523,11 @@ write_term_file(File, Terms) -> write_file(Path, Data) -> write_file(Path, Data, []). -%% write_file/3 and make_binary/1 is based on the implementation in -%% the kernel/file.erl file of the Erlang R14B02 release, licensed -%% under the EPL. That implementation does not do an fsync prior to -%% closing the file, hence the existence of this version. +%% write_file/3 and make_binary/1 are both based on corresponding +%% functions in the kernel/file.erl module of the Erlang R14B02 +%% release, which is licensed under the EPL. That implementation of +%% write_file/3 does not do an fsync prior to closing the file, hence +%% the existence of this version. APIs are otherwise identical. write_file(Path, Data, Modes) -> Modes1 = [binary, write | (Modes -- [binary, write])], case make_binary(Data) of -- cgit v1.2.1 From 04c6113a9f40b08868b4c32cd1467002d849722c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 17 May 2011 11:14:36 +0100 Subject: Not especially happy with this but can't think of how else to solve this. If the queue has mirrors then you may well find that during death you try to contact a dead queue process, because the mnesia table is yet to be updated. In such cases, loop. --- src/rabbit_amqqueue.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index d79fe9df..534d1002 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -256,8 +256,13 @@ lookup(Name) -> with(Name, F, E) -> case lookup(Name) of - {ok, Q} -> rabbit_misc:with_exit_handler(E, fun () -> F(Q) end); - {error, not_found} -> E() + {ok, Q = #amqqueue{mirror_pids = []}} -> + rabbit_misc:with_exit_handler(E, fun () -> F(Q) end); + {ok, Q} -> + E1 = fun () -> with(Name, F, E) end, + rabbit_misc:with_exit_handler(E1, fun () -> F(Q) end); + {error, not_found} -> + E() end. with(Name, F) -> -- cgit v1.2.1 From 990c53d772565fc6967b1cad17587bcc1e82b153 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 17 May 2011 11:25:58 +0100 Subject: Add a sleep, to avoid tight spinning --- src/rabbit_amqqueue.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 534d1002..8c374ef3 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -259,6 +259,7 @@ with(Name, F, E) -> {ok, Q = #amqqueue{mirror_pids = []}} -> rabbit_misc:with_exit_handler(E, fun () -> F(Q) end); {ok, Q} -> + timer:sleep(25), E1 = fun () -> with(Name, F, E) end, rabbit_misc:with_exit_handler(E1, fun () -> F(Q) end); {error, not_found} -> -- cgit v1.2.1 From 9e744ff212999ee6e4244504ffd4878334c7846a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 17 May 2011 12:12:48 +0100 Subject: Ensure that when a slave gets promoted, it requeues msgs in the same order which they were fetched --- src/rabbit_mirror_queue_slave.erl | 40 ++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index cceb67e2..052078bd 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -60,6 +60,7 @@ sender_queues, %% :: Pid -> MsgQ msg_id_ack, %% :: MsgId -> AckTag + ack_num, msg_id_status }). @@ -108,6 +109,8 @@ init([#amqqueue { name = QueueName } = Q]) -> sender_queues = dict:new(), msg_id_ack = dict:new(), + ack_num = 0, + msg_id_status = dict:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -456,7 +459,8 @@ promote_me(From, #state { q = Q, MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || {MsgId, {published, ChPid, MsgSeqNo}} <- dict:to_list(MS)]), - AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)], + NumAckTags = [NumAckTag || {_MsgId, NumAckTag} <- dict:to_list(MA)], + AckTags = [AckTag || {_Num, AckTag} <- lists:sort(NumAckTags)], Deliveries = [Delivery || {_ChPid, PubQ} <- dict:to_list(SQ), {Delivery, true} <- queue:to_list(PubQ)], QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( @@ -568,7 +572,6 @@ process_instruction( State = #state { sender_queues = SQ, backing_queue = BQ, backing_queue_state = BQS, - msg_id_ack = MA, msg_id_status = MS }) -> %% We really are going to do the publish right now, even though we @@ -628,12 +631,8 @@ process_instruction( {true, AckRequired} -> {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), - MA1 = case AckRequired of - true -> dict:store(MsgId, AckTag, MA); - false -> MA - end, - State1 #state { backing_queue_state = BQS1, - msg_id_ack = MA1 } + maybe_store_ack(AckRequired, MsgId, AckTag, + State1 #state { backing_queue_state = BQS1 }) end}; process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, State = #state { sender_queues = SQ, @@ -688,19 +687,14 @@ process_instruction({set_length, Length}, end}; process_instruction({fetch, AckRequired, MsgId, Remaining}, State = #state { backing_queue = BQ, - backing_queue_state = BQS, - msg_id_ack = MA }) -> + backing_queue_state = BQS }) -> QLen = BQ:len(BQS), {ok, case QLen - 1 of Remaining -> {{_Msg, _IsDelivered, AckTag, Remaining}, BQS1} = BQ:fetch(AckRequired, BQS), - MA1 = case AckRequired of - true -> dict:store(MsgId, AckTag, MA); - false -> MA - end, - State #state { backing_queue_state = BQS1, - msg_id_ack = MA1 }; + maybe_store_ack(AckRequired, MsgId, AckTag, + State #state { backing_queue_state = BQS1 }); Other when Other < Remaining -> %% we must be shorter than the master State @@ -744,11 +738,19 @@ msg_ids_to_acktags(MsgIds, MA) -> lists:foldl( fun (MsgId, {Acc, MAN}) -> case dict:find(MsgId, MA) of - error -> {Acc, MAN}; - {ok, AckTag} -> {[AckTag | Acc], dict:erase(MsgId, MAN)} + error -> {Acc, MAN}; + {ok, {_Num, AckTag}} -> {[AckTag | Acc], + dict:erase(MsgId, MAN)} end end, {[], MA}, MsgIds), {lists:reverse(AckTags), MA1}. ack_all(BQ, MA, BQS) -> - BQ:ack([AckTag || {_MsgId, AckTag} <- dict:to_list(MA)], BQS). + BQ:ack([AckTag || {_MsgId, {_Num, AckTag}} <- dict:to_list(MA)], BQS). + +maybe_store_ack(false, _MsgId, _AckTag, State) -> + State; +maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA, + ack_num = Num }) -> + State #state { msg_id_ack = dict:store(MsgId, {Num, AckTag}, MA), + ack_num = Num + 1 }. -- cgit v1.2.1 From 26b59ab8d2e38fab17d45470d17091ced7afca2a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 17 May 2011 12:28:39 +0100 Subject: Add upgrade step to add the mirror pids to the queue --- src/rabbit_upgrade_functions.erl | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 31bbb929..325156b1 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -28,6 +28,7 @@ -rabbit_upgrade({topic_trie, mnesia, []}). -rabbit_upgrade({semi_durable_route, mnesia, []}). -rabbit_upgrade({exchange_event_serial, mnesia, []}). +-rabbit_upgrade({mirror_pids, mnesia, []}). %% ------------------------------------------------------------------- @@ -41,6 +42,7 @@ -spec(topic_trie/0 :: () -> 'ok'). -spec(exchange_event_serial/0 :: () -> 'ok'). -spec(semi_durable_route/0 :: () -> 'ok'). +-spec(mirror_pids/0 :: () -> 'ok'). -endif. @@ -113,6 +115,19 @@ exchange_event_serial() -> create(rabbit_exchange_serial, [{record_name, exchange_serial}, {attributes, [name, next]}]). +mirror_pids() -> + Tables = [rabbit_queue, rabbit_durable_queue], + AddMirrorPidsFun = + fun ({amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid}) -> + {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid, []} + end, + [ ok = transform(T, + AddMirrorPidsFun, + [name, durable, auto_delete, exclusive_owner, arguments, + pid, mirror_pids]) + || T <- Tables ], + ok. + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 86da4d3fdb023363d52eec0d95fd823d707ce29c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 17 May 2011 16:40:01 +0100 Subject: Eliminate a race which was found to allow promotion to be non-atomic --- src/rabbit_mirror_queue_misc.erl | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index bf341c74..5f180c5e 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -20,6 +20,11 @@ -include("rabbit.hrl"). +%% If the dead pids include the queue pid (i.e. the master has died) +%% then only remove that if we are about to be promoted. Otherwise we +%% can have the situation where a slave updates the mnesia record for +%% a queue, promoting another slave before that slave realises it has +%% become the new master. remove_from_queue(QueueName, DeadPids) -> DeadNodes = [node(DeadPid) || DeadPid <- DeadPids], rabbit_misc:execute_mnesia_transaction( @@ -35,13 +40,22 @@ remove_from_queue(QueueName, DeadPids) -> not lists:member(node(Pid), DeadNodes)], case {{QPid, MPids}, {QPid1, MPids1}} of {Same, Same} -> - {ok, QPid}; - _ -> + ok; + _ when QPid =:= QPid1 orelse node(QPid1) =:= node() -> + %% Either master hasn't changed, so + %% we're ok to update mnesia; or master + %% has changed to become us! Q1 = Q #amqqueue { pid = QPid1, mirror_pids = MPids1 }, - ok = rabbit_amqqueue:store_queue(Q1), - {ok, QPid1} - end + ok = rabbit_amqqueue:store_queue(Q1); + _ -> + %% Master has changed, and we're not it, + %% so leave alone to allow the promoted + %% slave to find it and make its + %% promotion atomic. + ok + end, + {ok, QPid1} end end). -- cgit v1.2.1 From 76bf983a059a1f431be1452896baed32b8eef4bf Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 17 May 2011 17:29:38 +0100 Subject: Improve logging information --- src/rabbit_mirror_queue_coordinator.erl | 3 ++- src/rabbit_mirror_queue_slave.erl | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 05e4a808..729749dc 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -257,7 +257,8 @@ handle_call(get_gm, _From, State = #state { gm = GM }) -> handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> rabbit_log:info("Master ~p saw deaths ~p for ~s~n", - [self(), Deaths, rabbit_misc:rs(QueueName)]), + [self(), [{Pid, node(Pid)} || Pid <- Deaths], + rabbit_misc:rs(QueueName)]), case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= node() -> noreply(State); diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 052078bd..fdf9d9bc 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -143,7 +143,8 @@ handle_call({gm_deaths, Deaths}, From, gm = GM, master_node = MNode }) -> rabbit_log:info("Slave ~p saw deaths ~p for ~s~n", - [self(), Deaths, rabbit_misc:rs(QueueName)]), + [self(), [{Pid, node(Pid)} || Pid <- Deaths], + rabbit_misc:rs(QueueName)]), %% The GM has told us about deaths, which means we're not going to %% receive any more messages from GM case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of -- cgit v1.2.1 From 6d9e82abd7b035a6e43dfd8ee7dc8d7289843b60 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 18 May 2011 12:42:47 +0100 Subject: Do not read the msg surviving dropwhile1 --- src/rabbit_variable_queue.erl | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 7a3c17a2..d5533e42 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -565,19 +565,23 @@ dropwhile(Pred, State) -> dropwhile1(Pred, State) -> internal_queue_out( - fun(MsgStatus = #msg_status { msg_props = MsgProps }, State1) -> + fun(MsgStatus = #msg_status { msg_props = MsgProps, msg = Msg, + index_on_disk = IndexOnDisk }, + State1 = #vqstate { q3 = Q3, q4 = Q4 }) -> case Pred(MsgProps) of true -> {_, State2} = internal_fetch(false, MsgStatus, State1), dropwhile1(Pred, State2); false -> - %% message needs to go back into Q4 (or maybe go - %% in for the first time if it was loaded from - %% Q3). Also the msg contents might not be in - %% RAM, so read them in now - {MsgStatus1, State2 = #vqstate { q4 = Q4 }} = - read_msg(MsgStatus, State1), - {ok, State2 #vqstate {q4 = queue:in_r(MsgStatus1, Q4) }} + case Msg of + undefined -> + true = queue:is_empty(Q4), %% ASSERTION + Q3a = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + {ok, State1 #vqstate { q3 = Q3a }}; + _ -> + Q4a = queue:in_r(MsgStatus, Q4), + {ok, State1 #vqstate { q4 = Q4a }} + end end end, State). -- cgit v1.2.1 From 56ffe638d84646c2808b2b7a3bda0bae6a5ed933 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 18 May 2011 13:30:32 +0100 Subject: BQ:needs_idle_timeout :: State -> Bool ==> BQ:needs_timeout :: State -> (false | idle | timed); which better reflects the different needs --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_amqqueue_process.erl | 7 ++++--- src/rabbit_backing_queue.erl | 8 ++++---- src/rabbit_tests.erl | 8 ++++---- src/rabbit_variable_queue.erl | 30 ++++++++++++++++-------------- 5 files changed, 29 insertions(+), 26 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index d9296bf6..f43baf0d 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -66,7 +66,7 @@ -spec(set_ram_duration_target/2 :: (('undefined' | 'infinity' | number()), state()) -> state()). -spec(ram_duration/1 :: (state()) -> {number(), state()}). --spec(needs_idle_timeout/1 :: (state()) -> boolean()). +-spec(needs_timeout/1 :: (state()) -> 'false' | 'timed' | 'idle'). -spec(idle_timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 110817a9..6a9e6575 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -220,9 +220,10 @@ next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> ensure_rate_timer( confirm_messages(MsgIds, State#q{ backing_queue_state = BQS1}))), - case BQ:needs_idle_timeout(BQS1) of - true -> {ensure_sync_timer(State1), 0}; - false -> {stop_sync_timer(State1), hibernate} + case BQ:needs_timeout(BQS1) of + false -> {stop_sync_timer(State1), hibernate}; + idle -> {stop_sync_timer(State1), 0 }; + timed -> {ensure_sync_timer(State1), 0 } end. ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 0955a080..293b5655 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -152,11 +152,11 @@ behaviour_info(callbacks) -> %% Should 'idle_timeout' be called as soon as the queue process %% can manage (either on an empty mailbox, or when a timer %% fires)? - {needs_idle_timeout, 1}, + {needs_timeout, 1}, - %% Called (eventually) after needs_idle_timeout returns - %% 'true'. Note this may be called more than once for each 'true' - %% returned from needs_idle_timeout. + %% Called (eventually) after needs_timeout returns 'idle' or + %% 'timed'. Note this may be called more than once for each + %% 'idle' or 'timed' returned from needs_timeout. {idle_timeout, 1}, %% Called immediately before the queue hibernates. diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 3726420d..5137cce1 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2269,10 +2269,10 @@ check_variable_queue_status(VQ0, Props) -> VQ1. variable_queue_wait_for_shuffling_end(VQ) -> - case rabbit_variable_queue:needs_idle_timeout(VQ) of - true -> variable_queue_wait_for_shuffling_end( - rabbit_variable_queue:idle_timeout(VQ)); - false -> VQ + case rabbit_variable_queue:needs_timeout(VQ) of + false -> VQ; + _ -> variable_queue_wait_for_shuffling_end( + rabbit_variable_queue:idle_timeout(VQ)) end. test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 7a3c17a2..8e3cbada 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -21,7 +21,7 @@ fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, - needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, + needs_timeout/1, idle_timeout/1, handle_pre_hibernate/1, status/1, invoke/3, is_duplicate/3, discard/3, multiple_routing_keys/0]). @@ -830,19 +830,21 @@ ram_duration(State = #vqstate { ram_msg_count_prev = RamMsgCount, ram_ack_count_prev = RamAckCount }}. -needs_idle_timeout(State = #vqstate { on_sync = OnSync }) -> - case {OnSync, needs_index_sync(State)} of - {?BLANK_SYNC, false} -> - {Res, _State} = reduce_memory_use( - fun (_Quota, State1) -> {0, State1} end, - fun (_Quota, State1) -> State1 end, - fun (State1) -> State1 end, - fun (_Quota, State1) -> {0, State1} end, - State), - Res; - _ -> - true - end. + +needs_timeout(State = #vqstate { on_sync = ?BLANK_SYNC }) -> + case needs_index_sync(State) of + true -> timed; + false -> case reduce_memory_use(fun (_Quota, State1) -> {0, State1} end, + fun (_Quota, State1) -> State1 end, + fun (State1) -> State1 end, + fun (_Quota, State1) -> {0, State1} end, + State) of + {true, _State} -> idle; + {false, _State} -> false + end + end; +needs_timeout(_State) -> + timed. idle_timeout(State) -> a(reduce_memory_use(confirm_commit_index(tx_commit_index(State)))). -- cgit v1.2.1 From ce8a486e6e8fd08e78d696d0e31dd67fd0b5b670 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 18 May 2011 13:35:41 +0100 Subject: Well it's no shorter, but it closer matches the original --- src/rabbit_variable_queue.erl | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 8e3cbada..df4b9c49 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -831,20 +831,20 @@ ram_duration(State = #vqstate { ram_ack_count_prev = RamAckCount }}. -needs_timeout(State = #vqstate { on_sync = ?BLANK_SYNC }) -> - case needs_index_sync(State) of - true -> timed; - false -> case reduce_memory_use(fun (_Quota, State1) -> {0, State1} end, - fun (_Quota, State1) -> State1 end, - fun (State1) -> State1 end, - fun (_Quota, State1) -> {0, State1} end, - State) of - {true, _State} -> idle; - {false, _State} -> false - end - end; -needs_timeout(_State) -> - timed. +needs_timeout(State = #vqstate { on_sync = OnSync }) -> + case {OnSync, needs_index_sync(State)} of + {?BLANK_SYNC, false} -> + case reduce_memory_use(fun (_Quota, State1) -> {0, State1} end, + fun (_Quota, State1) -> State1 end, + fun (State1) -> State1 end, + fun (_Quota, State1) -> {0, State1} end, + State) of + {true, _State} -> idle; + {false, _State} -> false + end; + _ -> + timed + end. idle_timeout(State) -> a(reduce_memory_use(confirm_commit_index(tx_commit_index(State)))). -- cgit v1.2.1 From e73056d450d37065004deb1e7a0eaca1b0973ad1 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 18 May 2011 13:36:50 +0100 Subject: whitespace --- src/rabbit_variable_queue.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index df4b9c49..01f69712 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -830,7 +830,6 @@ ram_duration(State = #vqstate { ram_msg_count_prev = RamMsgCount, ram_ack_count_prev = RamAckCount }}. - needs_timeout(State = #vqstate { on_sync = OnSync }) -> case {OnSync, needs_index_sync(State)} of {?BLANK_SYNC, false} -> -- cgit v1.2.1 From dd284ed329fe2a0888d5a0df23fd7c5fcff8ba85 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 18 May 2011 15:43:51 +0100 Subject: And now, after testing it, actually make it work by correcting one of the counters... --- src/rabbit_variable_queue.erl | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index d5533e42..8998c0e8 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -567,21 +567,26 @@ dropwhile1(Pred, State) -> internal_queue_out( fun(MsgStatus = #msg_status { msg_props = MsgProps, msg = Msg, index_on_disk = IndexOnDisk }, - State1 = #vqstate { q3 = Q3, q4 = Q4 }) -> + State1 = #vqstate { q3 = Q3, q4 = Q4, + ram_index_count = RamIndexCount }) -> case Pred(MsgProps) of true -> {_, State2} = internal_fetch(false, MsgStatus, State1), dropwhile1(Pred, State2); false -> - case Msg of - undefined -> - true = queue:is_empty(Q4), %% ASSERTION - Q3a = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), - {ok, State1 #vqstate { q3 = Q3a }}; - _ -> - Q4a = queue:in_r(MsgStatus, Q4), - {ok, State1 #vqstate { q4 = Q4a }} - end + {ok, + case Msg of + undefined -> + true = queue:is_empty(Q4), %% ASSERTION + Q3a = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + RamIndexCount1 = + RamIndexCount + one_if(not IndexOnDisk), + State1 #vqstate { + q3 = Q3a, ram_index_count = RamIndexCount1 }; + _ -> + Q4a = queue:in_r(MsgStatus, Q4), + State1 #vqstate { q4 = Q4a } + end} end end, State). -- cgit v1.2.1 From 19bec198d96967a7d16fc6ac4feba6898b5ff426 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 18 May 2011 17:54:24 +0100 Subject: Only check the application environment when a channel starts up. --- src/rabbit_channel.erl | 26 +++++++++++++++----------- src/rabbit_trace.erl | 29 +++++++++++++++++++---------- 2 files changed, 34 insertions(+), 21 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index f0788862..2517528a 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -35,7 +35,7 @@ user, virtual_host, most_recently_declared_queue, consumer_mapping, blocking, consumer_monitors, queue_collector_pid, stats_timer, confirm_enabled, publish_seqno, unconfirmed_mq, - unconfirmed_qm, confirmed, capabilities}). + unconfirmed_qm, confirmed, capabilities, trace_state}). -define(MAX_PERMISSION_CACHE_SIZE, 12). @@ -185,7 +185,8 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, unconfirmed_mq = gb_trees:empty(), unconfirmed_qm = gb_trees:empty(), confirmed = [], - capabilities = Capabilities}, + capabilities = Capabilities, + trace_state = rabbit_trace:init(VHost)}, rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State)), rabbit_event:if_enabled(StatsTimer, fun() -> internal_emit_stats(State) end), @@ -263,8 +264,9 @@ handle_cast({deliver, ConsumerTag, AckRequired, #basic_message{exchange_name = ExchangeName, routing_keys = [RoutingKey | _CcRoutes], content = Content}}}, - State = #ch{writer_pid = WriterPid, - next_tag = DeliveryTag}) -> + State = #ch{writer_pid = WriterPid, + next_tag = DeliveryTag, + trace_state = TraceState}) -> State1 = lock_message(AckRequired, ack_record(DeliveryTag, ConsumerTag, Msg), State), @@ -281,7 +283,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, true -> deliver; false -> deliver_no_ack end, State), - rabbit_trace:tap_trace_out(Msg), + rabbit_trace:tap_trace_out(Msg, TraceState), noreply(State1#ch{next_tag = DeliveryTag + 1}); handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) -> @@ -591,7 +593,8 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, immediate = Immediate}, Content, State = #ch{virtual_host = VHostPath, transaction_id = TxnKey, - confirm_enabled = ConfirmEnabled}) -> + confirm_enabled = ConfirmEnabled, + trace_state = TraceState}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), check_write_permitted(ExchangeName, State), Exchange = rabbit_exchange:lookup_or_die(ExchangeName), @@ -608,7 +611,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, end, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> - rabbit_trace:tap_trace_in(Message), + rabbit_trace:tap_trace_in(Message, TraceState), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( Exchange, @@ -656,9 +659,10 @@ handle_method(#'basic.ack'{delivery_tag = DeliveryTag, handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, - _, State = #ch{writer_pid = WriterPid, - conn_pid = ConnPid, - next_tag = DeliveryTag}) -> + _, State = #ch{writer_pid = WriterPid, + conn_pid = ConnPid, + next_tag = DeliveryTag, + trace_state = TraceState}) -> QueueName = expand_queue_name_shortcut(QueueNameBin, State), check_read_permitted(QueueName, State), case rabbit_amqqueue:with_exclusive_access_or_die( @@ -677,7 +681,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, true -> get_no_ack; false -> get end, State), - rabbit_trace:tap_trace_out(Msg), + rabbit_trace:tap_trace_out(Msg, TraceState), ok = rabbit_writer:send_command( WriterPid, #'basic.get_ok'{delivery_tag = DeliveryTag, diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 2d15e7fc..e24d22ad 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,39 +16,48 @@ -module(rabbit_trace). --export([tap_trace_in/1, tap_trace_out/1]). +-export([init/1, tap_trace_in/2, tap_trace_out/2]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). +-record(trace_state, {trace_exchange}). + %%---------------------------------------------------------------------------- -ifdef(use_specs). --spec(tap_trace_in/1 :: (rabbit_types:basic_message()) -> 'ok'). --spec(tap_trace_out/1 :: (rabbit_amqqueue:qmsg()) -> 'ok'). +-type(state() :: #trace_state{trace_exchange :: rabbit_exchange:name()}). + +-spec(init/1 :: (rabbit_types:vhost()) -> state()). +-spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). +-spec(tap_trace_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok'). -endif. %%---------------------------------------------------------------------------- -tap_trace_in(Msg) -> - maybe_trace(Msg, <<"publish">>, xname(Msg), []). +init(VHost) -> + #trace_state{trace_exchange = trace_exchange(VHost)}. + +tap_trace_in(Msg, #trace_state{trace_exchange = TraceX}) -> + maybe_trace(Msg, TraceX, <<"publish">>, xname(Msg), []). -tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}) -> +tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, + #trace_state{trace_exchange = TraceX}) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(Msg, <<"deliver">>, QName, + maybe_trace(Msg, TraceX, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. -maybe_trace(Msg, RKPrefix, RKSuffix, Extra) -> +maybe_trace(Msg, TraceX, RKPrefix, RKSuffix, Extra) -> XName = xname(Msg), - case trace_exchange(vhost(Msg)) of + case TraceX of none -> ok; XName -> ok; - TraceX -> case catch trace(TraceX, Msg, RKPrefix, RKSuffix, Extra) of + _ -> case catch trace(TraceX, Msg, RKPrefix, RKSuffix, Extra) of {'EXIT', R} -> rabbit_log:info("Trace died: ~p~n", [R]); ok -> ok end -- cgit v1.2.1 From 1e2c482d34e80270518e58311105772763df730e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 10:53:01 +0100 Subject: Refactor a bit and push more work "below" checking the state. This reduces the cost from 2.3% to 1.1%. --- src/rabbit_trace.erl | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index e24d22ad..d0fc4a39 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -21,13 +21,11 @@ -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --record(trace_state, {trace_exchange}). - %%---------------------------------------------------------------------------- -ifdef(use_specs). --type(state() :: #trace_state{trace_exchange :: rabbit_exchange:name()}). +-type(state() :: rabbit_exchange:name()). -spec(init/1 :: (rabbit_types:vhost()) -> state()). -spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). @@ -38,29 +36,32 @@ %%---------------------------------------------------------------------------- init(VHost) -> - #trace_state{trace_exchange = trace_exchange(VHost)}. + trace_exchange(VHost). -tap_trace_in(Msg, #trace_state{trace_exchange = TraceX}) -> - maybe_trace(Msg, TraceX, <<"publish">>, xname(Msg), []). +tap_trace_in(Msg, TraceX) -> + maybe_trace(Msg, TraceX, publish, []). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, - #trace_state{trace_exchange = TraceX}) -> + TraceX) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(Msg, TraceX, <<"deliver">>, QName, + maybe_trace(Msg, TraceX, {deliver, QName}, [{<<"redelivered">>, signedint, RedeliveredNum}]). xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. -maybe_trace(Msg, TraceX, RKPrefix, RKSuffix, Extra) -> - XName = xname(Msg), +maybe_trace(Msg, TraceX, Mode, Extra) -> case TraceX of - none -> ok; - XName -> ok; - _ -> case catch trace(TraceX, Msg, RKPrefix, RKSuffix, Extra) of - {'EXIT', R} -> rabbit_log:info("Trace died: ~p~n", [R]); - ok -> ok - end + none -> ok; + _ -> X = xname(Msg), + case TraceX of + X -> ok; + _ -> case catch trace(TraceX, Msg, Mode, Extra) of + {'EXIT', R} -> rabbit_log:info( + "Trace died: ~p~n", [R]); + ok -> ok + end + end end. trace_exchange(VHost) -> @@ -69,8 +70,12 @@ trace_exchange(VHost) -> {ok, Xs} -> proplists:get_value(VHost, Xs, none) end. -trace(TraceX, Msg0, RKPrefix, RKSuffix, Extra) -> +trace(TraceX, Msg0, Mode, Extra) -> Msg = ensure_content_decoded(Msg0), + {RKPrefix, RKSuffix} = case Mode of + publish -> {<<"publish">>, xname(Msg0)}; + {deliver, Q} -> {<<"deliver">>, Q} + end, rabbit_basic:publish(rabbit_misc:r(vhost(Msg), exchange, TraceX), <>, #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, -- cgit v1.2.1 From a0588a3cdf4f5c56ea7d1c845c21ecdeaa0ae76f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 12:04:27 +0100 Subject: rabbitmqctl refresh_channel_config --- docs/rabbitmqctl.1.xml | 8 ++++++++ src/rabbit_channel.erl | 11 ++++++++++- src/rabbit_control.erl | 4 ++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 62869158..24228f41 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1323,6 +1323,14 @@ + + refresh_channel_config + + + Tell all channels to refresh their configuration (currently only the value of trace_exchanges). + + + diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 2517528a..301efb43 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -23,7 +23,7 @@ -export([start_link/10, do/2, do/3, flush/1, shutdown/1]). -export([send_command/2, deliver/4, flushed/2, confirm/2]). -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). --export([emit_stats/1, ready_for_close/1]). +-export([emit_stats/1, ready_for_close/1, refresh_config_all/0]). -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, @@ -89,6 +89,7 @@ -spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()). -spec(info_all/0 :: () -> [rabbit_types:infos()]). -spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). +-spec(refresh_config_all/0 :: () -> 'ok'). -spec(emit_stats/1 :: (pid()) -> 'ok'). -spec(ready_for_close/1 :: (pid()) -> 'ok'). @@ -146,6 +147,11 @@ info_all() -> info_all(Items) -> rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()). +refresh_config_all() -> + rabbit_misc:upmap( + fun (C) -> gen_server2:call(C, refresh_config) end, list()), + ok. + emit_stats(Pid) -> gen_server2:cast(Pid, emit_stats). @@ -219,6 +225,9 @@ handle_call({info, Items}, _From, State) -> catch Error -> reply({error, Error}, State) end; +handle_call(refresh_config, _From, State = #ch{virtual_host = VHost}) -> + reply(ok, State#ch{trace_state = rabbit_trace:init(VHost)}); + handle_call(_Request, _From, State) -> noreply(State). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 6ab07111..6966fd04 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -295,6 +295,10 @@ action(unset_env, Node, [Var], _Opts, Inform) -> Inform("Clearing control variable ~s for node ~p", [Var, Node]), rpc_call(Node, application, unset_env, [rabbit, parse(Var)]); +action(refresh_channel_config, Node, [], _Opts, Inform) -> + Inform("Telling channels to refresh configuration", []), + rpc_call(Node, rabbit_channel, refresh_config_all, []); + action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Setting permissions for user ~p in vhost ~p", [Username, VHost]), -- cgit v1.2.1 From 0cc380824778e2305013efe764effd36a809b36e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 12:30:16 +0100 Subject: Rename --- docs/rabbitmqctl.1.xml | 4 ++-- src/rabbit_control.erl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 24228f41..0719bf3b 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1324,10 +1324,10 @@ - refresh_channel_config + refresh_config - Tell all channels to refresh their configuration (currently only the value of trace_exchanges). + Tell running RabbitMQ processes to refresh their configuration (currently only the value of trace_exchanges). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 6966fd04..94169969 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -295,8 +295,8 @@ action(unset_env, Node, [Var], _Opts, Inform) -> Inform("Clearing control variable ~s for node ~p", [Var, Node]), rpc_call(Node, application, unset_env, [rabbit, parse(Var)]); -action(refresh_channel_config, Node, [], _Opts, Inform) -> - Inform("Telling channels to refresh configuration", []), +action(refresh_config, Node, [], _Opts, Inform) -> + Inform("Refreshing configuration", []), rpc_call(Node, rabbit_channel, refresh_config_all, []); action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> -- cgit v1.2.1 From e810f74607e8fd722d2df87334603db4672ab4f2 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 12:41:22 +0100 Subject: Fold refresh_config in with (un)set_env, abstract into rabbit. --- docs/rabbitmqctl.1.xml | 8 -------- src/rabbit.erl | 16 ++++++++++++++++ src/rabbit_control.erl | 10 +++------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 0719bf3b..62869158 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1323,14 +1323,6 @@ - - refresh_config - - - Tell running RabbitMQ processes to refresh their configuration (currently only the value of trace_exchanges). - - - diff --git a/src/rabbit.erl b/src/rabbit.erl index e6e80b4a..bbe98789 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -25,6 +25,8 @@ -export([log_location/1]). +-export([get_env/1, set_env/2, unset_env/1]). + %%--------------------------------------------------------------------------- %% Boot steps. -export([maybe_insert_default_data/0, boot_delegate/0, recover/0]). @@ -186,6 +188,9 @@ -spec(maybe_insert_default_data/0 :: () -> 'ok'). -spec(boot_delegate/0 :: () -> 'ok'). -spec(recover/0 :: () -> 'ok'). +-spec(get_env/1 :: (atom()) -> any()). +-spec(set_env/2 :: (atom(), any()) -> 'ok'). +-spec(unset_env/1 :: (atom()) -> 'ok'). -endif. @@ -514,3 +519,14 @@ log_rotation_result(ok, {error, SaslLogError}) -> {error, {cannot_rotate_sasl_logs, SaslLogError}}; log_rotation_result(ok, ok) -> ok. + +get_env(Key) -> + application:get_env(rabbit, Key). + +set_env(Key, Value) -> + application:set_env(rabbit, Key, Value), + rabbit_channel:refresh_config_all(). + +unset_env(Key) -> + application:unset_env(rabbit, Key), + rabbit_channel:refresh_config_all(). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 94169969..5a013711 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -284,20 +284,16 @@ action(list_consumers, Node, _Args, Opts, Inform) -> action(set_env, Node, [Var, Term], _Opts, Inform) -> Inform("Setting control variable ~s for node ~p to ~s", [Var, Node, Term]), - rpc_call(Node, application, set_env, [rabbit, parse(Var), parse(Term)]); + rpc_call(Node, rabbit, set_env, [parse(Var), parse(Term)]); action(get_env, Node, [Var], _Opts, Inform) -> Inform("Getting control variable ~s for node ~p", [Var, Node]), - Val = rpc_call(Node, application, get_env, [rabbit, parse(Var)]), + Val = rpc_call(Node, rabbit, get_env, [parse(Var)]), io:format("~p~n", [Val]); action(unset_env, Node, [Var], _Opts, Inform) -> Inform("Clearing control variable ~s for node ~p", [Var, Node]), - rpc_call(Node, application, unset_env, [rabbit, parse(Var)]); - -action(refresh_config, Node, [], _Opts, Inform) -> - Inform("Refreshing configuration", []), - rpc_call(Node, rabbit_channel, refresh_config_all, []); + rpc_call(Node, rabbit, unset_env, [parse(Var)]); action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), -- cgit v1.2.1 From 2d318b334722df94ef5b18811c37200e724ea63c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 19 May 2011 13:32:09 +0100 Subject: BQ:idle_timeout => BQ:timeout --- include/rabbit_backing_queue_spec.hrl | 2 +- src/rabbit_amqqueue_process.erl | 8 ++++---- src/rabbit_backing_queue.erl | 4 ++-- src/rabbit_tests.erl | 4 ++-- src/rabbit_variable_queue.erl | 6 +++--- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index f43baf0d..1c2b94e2 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -67,7 +67,7 @@ (('undefined' | 'infinity' | number()), state()) -> state()). -spec(ram_duration/1 :: (state()) -> {number(), state()}). -spec(needs_timeout/1 :: (state()) -> 'false' | 'timed' | 'idle'). --spec(idle_timeout/1 :: (state()) -> state()). +-spec(timeout/1 :: (state()) -> state()). -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((atom(), A) -> A), state()) -> state()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 6a9e6575..8091e2c2 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -662,8 +662,8 @@ maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg). qname(#q{q = #amqqueue{name = QName}}) -> QName. -backing_queue_idle_timeout(State = #q{backing_queue = BQ}) -> - run_backing_queue(BQ, fun (M, BQS) -> M:idle_timeout(BQS) end, State). +backing_queue_timeout(State = #q{backing_queue = BQ}) -> + run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State). run_backing_queue(Mod, Fun, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> @@ -1047,7 +1047,7 @@ handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); handle_cast(sync_timeout, State) -> - noreply(backing_queue_idle_timeout(State#q{sync_timer_ref = undefined})); + noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined})); handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. @@ -1181,7 +1181,7 @@ handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) -> end; handle_info(timeout, State) -> - noreply(backing_queue_idle_timeout(State)); + noreply(backing_queue_timeout(State)); handle_info({'EXIT', _Pid, Reason}, State) -> {stop, Reason, State}; diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 293b5655..addaabc5 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -149,7 +149,7 @@ behaviour_info(callbacks) -> %% queue. {ram_duration, 1}, - %% Should 'idle_timeout' be called as soon as the queue process + %% Should 'timeout' be called as soon as the queue process %% can manage (either on an empty mailbox, or when a timer %% fires)? {needs_timeout, 1}, @@ -157,7 +157,7 @@ behaviour_info(callbacks) -> %% Called (eventually) after needs_timeout returns 'idle' or %% 'timed'. Note this may be called more than once for each %% 'idle' or 'timed' returned from needs_timeout. - {idle_timeout, 1}, + {timeout, 1}, %% Called immediately before the queue hibernates. {handle_pre_hibernate, 1}, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 5137cce1..1a37cdff 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2272,7 +2272,7 @@ variable_queue_wait_for_shuffling_end(VQ) -> case rabbit_variable_queue:needs_timeout(VQ) of false -> VQ; _ -> variable_queue_wait_for_shuffling_end( - rabbit_variable_queue:idle_timeout(VQ)) + rabbit_variable_queue:timeout(VQ)) end. test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> @@ -2300,7 +2300,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2), {_Guids, VQ4} = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), - VQ5 = rabbit_variable_queue:idle_timeout(VQ4), + VQ5 = rabbit_variable_queue:timeout(VQ4), _VQ6 = rabbit_variable_queue:terminate(VQ5), VQ7 = variable_queue_init(test_amqqueue(true), true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 01f69712..3361e552 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -21,7 +21,7 @@ fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, - needs_timeout/1, idle_timeout/1, handle_pre_hibernate/1, + needs_timeout/1, timeout/1, handle_pre_hibernate/1, status/1, invoke/3, is_duplicate/3, discard/3, multiple_routing_keys/0]). @@ -146,7 +146,7 @@ %% any one time. This further smooths the effects of changes to the %% target_ram_count and ensures the queue remains responsive %% even when there is a large amount of IO work to do. The -%% idle_timeout callback is utilised to ensure that conversions are +%% timeout callback is utilised to ensure that conversions are %% done as promptly as possible whilst ensuring the queue remains %% responsive. %% @@ -845,7 +845,7 @@ needs_timeout(State = #vqstate { on_sync = OnSync }) -> timed end. -idle_timeout(State) -> +timeout(State) -> a(reduce_memory_use(confirm_commit_index(tx_commit_index(State)))). handle_pre_hibernate(State = #vqstate { index_state = IndexState }) -> -- cgit v1.2.1 From 8b75deed4bdcc56c9ab6455b3d150f79cb4386da Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 19 May 2011 14:16:30 +0100 Subject: simplify --- src/rabbit_variable_queue.erl | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 8998c0e8..b8771600 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -565,31 +565,24 @@ dropwhile(Pred, State) -> dropwhile1(Pred, State) -> internal_queue_out( - fun(MsgStatus = #msg_status { msg_props = MsgProps, msg = Msg, - index_on_disk = IndexOnDisk }, - State1 = #vqstate { q3 = Q3, q4 = Q4, - ram_index_count = RamIndexCount }) -> + fun(MsgStatus = #msg_status { msg_props = MsgProps }, State1) -> case Pred(MsgProps) of - true -> - {_, State2} = internal_fetch(false, MsgStatus, State1), - dropwhile1(Pred, State2); - false -> - {ok, - case Msg of - undefined -> - true = queue:is_empty(Q4), %% ASSERTION - Q3a = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), - RamIndexCount1 = - RamIndexCount + one_if(not IndexOnDisk), - State1 #vqstate { - q3 = Q3a, ram_index_count = RamIndexCount1 }; - _ -> - Q4a = queue:in_r(MsgStatus, Q4), - State1 #vqstate { q4 = Q4a } - end} + true -> {_, State2} = internal_fetch(false, MsgStatus, + State1), + dropwhile1(Pred, State2); + false -> {ok, in_r(MsgStatus, State1)} end end, State). +in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, + State = #vqstate { q3 = Q3, q4 = Q4, ram_index_count = RamIndexCount }) -> + true = queue:is_empty(Q4), %% ASSERTION + State #vqstate { + q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; +in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> + State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. + fetch(AckRequired, State) -> internal_queue_out( fun(MsgStatus, State1) -> -- cgit v1.2.1 From 3ecee483b94dd4c09875979d11cca3c58bcd89db Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 19 May 2011 15:10:11 +0100 Subject: Make slaves explicitly monitor the master (see comments to come in bug) --- src/rabbit_mirror_queue_slave.erl | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index fdf9d9bc..422b0d59 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -93,6 +93,7 @@ init([#amqqueue { name = QueueName } = Q]) -> write), {ok, QPid} end), + erlang:monitor(process, MPid), ok = file_handle_cache:register_callback( rabbit_amqqueue, set_maximum_since_use, [self()]), ok = rabbit_memory_monitor:register( @@ -149,11 +150,15 @@ handle_call({gm_deaths, Deaths}, From, %% receive any more messages from GM case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= MNode -> + %% master hasn't changed reply(ok, State); {ok, Pid} when node(Pid) =:= node() -> + %% we've become master promote_me(From, State); {ok, Pid} -> + %% master has changed to not us. gen_server2:reply(From, ok), + erlang:monitor(process, Pid), ok = gm:broadcast(GM, heartbeat), noreply(State #state { master_node = node(Pid) }); {error, not_found} -> @@ -209,6 +214,11 @@ handle_cast({rollback, _Txn, _ChPid}, State) -> handle_info(timeout, State) -> noreply(backing_queue_idle_timeout(State)); +handle_info({'DOWN', _MonitorRef, process, Pid, _Reason}, + State = #state { gm = GM }) -> + ok = gm:broadcast(GM, {process_death, Pid}), + noreply(State); + handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}. @@ -276,6 +286,16 @@ joined([SPid], _Members) -> members_changed([_SPid], _Births, []) -> ok; members_changed([SPid], _Births, Deaths) -> + inform_deaths(SPid, Deaths). + +handle_msg([_SPid], _From, heartbeat) -> + ok; +handle_msg([SPid], _From, {process_death, Pid}) -> + inform_deaths(SPid, [Pid]); +handle_msg([SPid], _From, Msg) -> + ok = gen_server2:cast(SPid, {gm, Msg}). + +inform_deaths(SPid, Deaths) -> rabbit_misc:with_exit_handler( fun () -> {stop, normal} end, fun () -> @@ -287,11 +307,6 @@ members_changed([SPid], _Births, Deaths) -> end end). -handle_msg([_SPid], _From, heartbeat) -> - ok; -handle_msg([SPid], _From, Msg) -> - ok = gen_server2:cast(SPid, {gm, Msg}). - %% --------------------------------------------------------------------------- %% Others %% --------------------------------------------------------------------------- -- cgit v1.2.1 From 862080c059402681e29560a34b264ab5ea8769c3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 19 May 2011 15:57:43 +0100 Subject: Be more assertive. Slave is the equiv of amqqueue_process, so it's ok here to know the message is actually a #basic_message and thus contains an id which can be matched against --- src/rabbit_mirror_queue_slave.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 422b0d59..da01e076 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -707,8 +707,8 @@ process_instruction({fetch, AckRequired, MsgId, Remaining}, QLen = BQ:len(BQS), {ok, case QLen - 1 of Remaining -> - {{_Msg, _IsDelivered, AckTag, Remaining}, BQS1} = - BQ:fetch(AckRequired, BQS), + {{#basic_message{id = MsgId}, _IsDelivered, + AckTag, Remaining}, BQS1} = BQ:fetch(AckRequired, BQS), maybe_store_ack(AckRequired, MsgId, AckTag, State #state { backing_queue_state = BQS1 }); Other when Other < Remaining -> -- cgit v1.2.1 From 531c99685c9f0cbf552aee8fc3416acbaf4b8b1e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 16:05:02 +0100 Subject: Slightly tighter specs, tidy imports. --- src/rabbit.erl | 4 ++-- src/rabbit_channel.erl | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index bbe98789..9052f2f9 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -188,8 +188,8 @@ -spec(maybe_insert_default_data/0 :: () -> 'ok'). -spec(boot_delegate/0 :: () -> 'ok'). -spec(recover/0 :: () -> 'ok'). --spec(get_env/1 :: (atom()) -> any()). --spec(set_env/2 :: (atom(), any()) -> 'ok'). +-spec(get_env/1 :: (atom()) -> term()). +-spec(set_env/2 :: (atom(), term()) -> 'ok'). -spec(unset_env/1 :: (atom()) -> 'ok'). -endif. diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 301efb43..e5e61e56 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -22,8 +22,9 @@ -export([start_link/10, do/2, do/3, flush/1, shutdown/1]). -export([send_command/2, deliver/4, flushed/2, confirm/2]). --export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). --export([emit_stats/1, ready_for_close/1, refresh_config_all/0]). +-export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1, + refresh_config_all/0]). +-export([emit_stats/1, ready_for_close/1]). -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, -- cgit v1.2.1 From 8db3126a4f1a3933f3b3ac7084072362bdd12b57 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 16:25:06 +0100 Subject: Refactor. --- src/rabbit_trace.erl | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index d0fc4a39..6f21e575 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -39,29 +39,33 @@ init(VHost) -> trace_exchange(VHost). tap_trace_in(Msg, TraceX) -> - maybe_trace(Msg, TraceX, publish, []). + maybe_trace(Msg, TraceX, + fun () -> + XName = xname(Msg), + {<<"publish.", XName/binary>>, msg_to_table(Msg)} + end). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceX) -> - RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(Msg, TraceX, {deliver, QName}, - [{<<"redelivered">>, signedint, RedeliveredNum}]). + maybe_trace(Msg, TraceX, + fun () -> + H = {<<"redelivered">>, signedint, + case Redelivered of true -> 1; false -> 0 end}, + {<<"deliver.", QName/binary>>, [H | msg_to_table(Msg)]} + end). xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. -maybe_trace(Msg, TraceX, Mode, Extra) -> +maybe_trace(Msg, none, Fun) -> ok; +maybe_trace(Msg, TraceX, Fun) -> + X = xname(Msg), case TraceX of - none -> ok; - _ -> X = xname(Msg), - case TraceX of - X -> ok; - _ -> case catch trace(TraceX, Msg, Mode, Extra) of - {'EXIT', R} -> rabbit_log:info( - "Trace died: ~p~n", [R]); - ok -> ok - end - end + X -> ok; + _ -> case catch trace(TraceX, Msg, Fun) of + {'EXIT', R} -> rabbit_log:info("Trace died: ~p~n", [R]); + ok -> ok + end end. trace_exchange(VHost) -> @@ -70,16 +74,11 @@ trace_exchange(VHost) -> {ok, Xs} -> proplists:get_value(VHost, Xs, none) end. -trace(TraceX, Msg0, Mode, Extra) -> +trace(TraceX, Msg0, Fun) -> Msg = ensure_content_decoded(Msg0), - {RKPrefix, RKSuffix} = case Mode of - publish -> {<<"publish">>, xname(Msg0)}; - {deliver, Q} -> {<<"deliver">>, Q} - end, + {RKey, Headers} = Fun(), rabbit_basic:publish(rabbit_misc:r(vhost(Msg), exchange, TraceX), - <>, - #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, - payload(Msg)), + RKey, #'P_basic'{headers = Headers}, payload(Msg)), ok. msg_to_table(#basic_message{exchange_name = #resource{name = XName}, -- cgit v1.2.1 From c39f7e65ced88329bbde31411690b600bb439f90 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 16:44:09 +0100 Subject: Further refactoring --- src/rabbit_channel.erl | 5 ++--- src/rabbit_trace.erl | 33 +++++++++++++++------------------ 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index e5e61e56..991b0b06 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -22,9 +22,8 @@ -export([start_link/10, do/2, do/3, flush/1, shutdown/1]). -export([send_command/2, deliver/4, flushed/2, confirm/2]). --export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1, - refresh_config_all/0]). --export([emit_stats/1, ready_for_close/1]). +-export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). +-export([refresh_config_all/0, emit_stats/1, ready_for_close/1]). -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 6f21e575..ed1ac05f 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -41,8 +41,7 @@ init(VHost) -> tap_trace_in(Msg, TraceX) -> maybe_trace(Msg, TraceX, fun () -> - XName = xname(Msg), - {<<"publish.", XName/binary>>, msg_to_table(Msg)} + {<<"publish.", (xname(Msg))/binary>>, msg_to_table(Msg)} end). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, @@ -57,15 +56,20 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. -maybe_trace(Msg, none, Fun) -> ok; -maybe_trace(Msg, TraceX, Fun) -> - X = xname(Msg), - case TraceX of - X -> ok; - _ -> case catch trace(TraceX, Msg, Fun) of - {'EXIT', R} -> rabbit_log:info("Trace died: ~p~n", [R]); - ok -> ok - end +maybe_trace(_Msg, none, _Fun) -> + ok; +maybe_trace(Msg0, TraceX, Fun) -> + case xname(Msg0) of + TraceX -> ok; + _ -> Msg = ensure_content_decoded(Msg0), + X = rabbit_misc:r(vhost(Msg), exchange, TraceX), + {RKey, Headers} = Fun(), + P = #'P_basic'{headers = Headers}, + case catch rabbit_basic:publish(X, RKey, P, payload(Msg)) of + {'EXIT', R} -> rabbit_log:info( + "Trace publish died: ~p~n", [R]); + {ok, _, _} -> ok + end end. trace_exchange(VHost) -> @@ -74,13 +78,6 @@ trace_exchange(VHost) -> {ok, Xs} -> proplists:get_value(VHost, Xs, none) end. -trace(TraceX, Msg0, Fun) -> - Msg = ensure_content_decoded(Msg0), - {RKey, Headers} = Fun(), - rabbit_basic:publish(rabbit_misc:r(vhost(Msg), exchange, TraceX), - RKey, #'P_basic'{headers = Headers}, payload(Msg)), - ok. - msg_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, content = #content{properties = Props}}) -> -- cgit v1.2.1 From 358a68480d6eb0b0c388deb166342d49c56382f3 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 19 May 2011 17:36:24 +0100 Subject: funs are too expensive here. Ouch. --- src/rabbit_trace.erl | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index ed1ac05f..df44ed25 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -39,32 +39,31 @@ init(VHost) -> trace_exchange(VHost). tap_trace_in(Msg, TraceX) -> - maybe_trace(Msg, TraceX, - fun () -> - {<<"publish.", (xname(Msg))/binary>>, msg_to_table(Msg)} - end). + maybe_trace(Msg, TraceX, publish, []). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceX) -> - maybe_trace(Msg, TraceX, - fun () -> - H = {<<"redelivered">>, signedint, - case Redelivered of true -> 1; false -> 0 end}, - {<<"deliver.", QName/binary>>, [H | msg_to_table(Msg)]} - end). + RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, + maybe_trace(Msg, TraceX, {deliver, QName}, + [{<<"redelivered">>, signedint, RedeliveredNum}]). xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. -maybe_trace(_Msg, none, _Fun) -> +maybe_trace(_Msg, none, _Mode, _Extra) -> ok; -maybe_trace(Msg0, TraceX, Fun) -> +maybe_trace(Msg0, TraceX, Mode, Extra) -> case xname(Msg0) of TraceX -> ok; _ -> Msg = ensure_content_decoded(Msg0), X = rabbit_misc:r(vhost(Msg), exchange, TraceX), - {RKey, Headers} = Fun(), - P = #'P_basic'{headers = Headers}, + {RKPrefix, RKSuffix} = + case Mode of + publish -> {<<"publish">>, xname(Msg0)}; + {deliver, Q} -> {<<"deliver">>, Q} + end, + RKey = <>, + P = #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, case catch rabbit_basic:publish(X, RKey, P, payload(Msg)) of {'EXIT', R} -> rabbit_log:info( "Trace publish died: ~p~n", [R]); -- cgit v1.2.1 From b52f2ffca60521f21f4dc74d3ec0dccc754e6764 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 19 May 2011 18:40:35 +0100 Subject: sometimes clarity trumps performance I can't actually detect a difference in performance here, though I'm sure the new code is slower than the old one. --- src/rabbit_trace.erl | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index df44ed25..36125376 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -39,29 +39,24 @@ init(VHost) -> trace_exchange(VHost). tap_trace_in(Msg, TraceX) -> - maybe_trace(Msg, TraceX, publish, []). + maybe_trace(Msg, TraceX, <<"publish">>, xname(Msg), []). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceX) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(Msg, TraceX, {deliver, QName}, + maybe_trace(Msg, TraceX, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. -maybe_trace(_Msg, none, _Mode, _Extra) -> +maybe_trace(_Msg, none, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(Msg0, TraceX, Mode, Extra) -> +maybe_trace(Msg0, TraceX, RKPrefix, RKSuffix, Extra) -> case xname(Msg0) of TraceX -> ok; _ -> Msg = ensure_content_decoded(Msg0), X = rabbit_misc:r(vhost(Msg), exchange, TraceX), - {RKPrefix, RKSuffix} = - case Mode of - publish -> {<<"publish">>, xname(Msg0)}; - {deliver, Q} -> {<<"deliver">>, Q} - end, RKey = <>, P = #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, case catch rabbit_basic:publish(X, RKey, P, payload(Msg)) of -- cgit v1.2.1 From a93d3d23fb1768ff6b1518052ee7cf12cf4c30f2 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 19 May 2011 19:11:31 +0100 Subject: inlining --- src/rabbit_trace.erl | 57 +++++++++++++++++++++++----------------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 36125376..ab6ef982 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -36,7 +36,10 @@ %%---------------------------------------------------------------------------- init(VHost) -> - trace_exchange(VHost). + case application:get_env(rabbit, trace_exchanges) of + undefined -> none; + {ok, Xs} -> proplists:get_value(VHost, Xs, none) + end. tap_trace_in(Msg, TraceX) -> maybe_trace(Msg, TraceX, <<"publish">>, xname(Msg), []). @@ -47,44 +50,39 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, maybe_trace(Msg, TraceX, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). -xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. -vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. - maybe_trace(_Msg, none, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(Msg0, TraceX, RKPrefix, RKSuffix, Extra) -> - case xname(Msg0) of +maybe_trace(Msg, TraceX, RKPrefix, RKSuffix, Extra) -> + case xname(Msg) of TraceX -> ok; - _ -> Msg = ensure_content_decoded(Msg0), - X = rabbit_misc:r(vhost(Msg), exchange, TraceX), - RKey = <>, - P = #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, - case catch rabbit_basic:publish(X, RKey, P, payload(Msg)) of + _ -> case catch rabbit_basic:publish( + rabbit_misc:r(vhost(Msg), exchange, TraceX), + <>, + #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, + payload(Msg)) of {'EXIT', R} -> rabbit_log:info( "Trace publish died: ~p~n", [R]); {ok, _, _} -> ok end end. -trace_exchange(VHost) -> - case application:get_env(rabbit, trace_exchanges) of - undefined -> none; - {ok, Xs} -> proplists:get_value(VHost, Xs, none) - end. +xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. +vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. msg_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, - content = #content{properties = Props}}) -> + content = Content}) -> + #content{properties = Props} = + rabbit_binary_parser:ensure_content_decoded(Content), {PropsTable, _Ix} = - lists:foldl( - fun (K, {L, Ix}) -> - V = element(Ix, Props), - NewL = case V of - undefined -> L; - _ -> [{a2b(K), type(V), V} | L] - end, - {NewL, Ix + 1} - end, {[], 2}, record_info(fields, 'P_basic')), + lists:foldl(fun (K, {L, Ix}) -> + V = element(Ix, Props), + NewL = case V of + undefined -> L; + _ -> [{a2b(K), type(V), V} | L] + end, + {NewL, Ix + 1} + end, {[], 2}, record_info(fields, 'P_basic')), [{<<"exchange_name">>, longstr, XName}, {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]}, {<<"properties">>, table, PropsTable}, @@ -93,12 +91,7 @@ msg_to_table(#basic_message{exchange_name = #resource{name = XName}, payload(#basic_message{content = #content{payload_fragments_rev = PFR}}) -> list_to_binary(lists:reverse(PFR)). -ensure_content_decoded(Msg = #basic_message{content = Content}) -> - Msg#basic_message{content = rabbit_binary_parser:ensure_content_decoded( - Content)}. - -a2b(A) -> - list_to_binary(atom_to_list(A)). +a2b(A) -> list_to_binary(atom_to_list(A)). type(V) when is_list(V) -> table; type(V) when is_integer(V) -> signedint; -- cgit v1.2.1 From c86418efd720f04ceaa2ee3b208688e179039929 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 19 May 2011 19:50:38 +0100 Subject: fix error handling and construct xname on init rather than every message I have no idea why that 'catch' was there. There is nothing to go wrong during publish except the exchange may not be found. Which the 'catch' wasn't catching since that is just an ordinary {error, not_found} return. Cue explosion. So now we handle that and print a nice error message. Also, there is no point constructing the exchange name for every message; doing that once, on init, is enough. That has the further advantage that now the implementation matches the spec (rabbit_exchange:name() is a qualified name, not a binary). --- src/rabbit_trace.erl | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index ab6ef982..9543f8de 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -25,7 +25,7 @@ -ifdef(use_specs). --type(state() :: rabbit_exchange:name()). +-type(state() :: rabbit_exchange:name() | 'none'). -spec(init/1 :: (rabbit_types:vhost()) -> state()). -spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). @@ -38,7 +38,10 @@ init(VHost) -> case application:get_env(rabbit, trace_exchanges) of undefined -> none; - {ok, Xs} -> proplists:get_value(VHost, Xs, none) + {ok, Xs} -> case proplists:get_value(VHost, Xs, none) of + none -> none; + XN -> rabbit_misc:r(VHost, exchange, XN) + end end. tap_trace_in(Msg, TraceX) -> @@ -52,22 +55,21 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, maybe_trace(_Msg, none, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(Msg, TraceX, RKPrefix, RKSuffix, Extra) -> +maybe_trace(Msg, X, RKPrefix, RKSuffix, Extra) -> case xname(Msg) of - TraceX -> ok; - _ -> case catch rabbit_basic:publish( - rabbit_misc:r(vhost(Msg), exchange, TraceX), - <>, - #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, - payload(Msg)) of - {'EXIT', R} -> rabbit_log:info( - "Trace publish died: ~p~n", [R]); - {ok, _, _} -> ok - end + X -> ok; + _ -> case rabbit_basic:publish( + X, + <>, + #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, + payload(Msg)) of + {ok, _, _} -> ok; + {error, not_found} -> rabbit_log:info("trace ~s not found~n", + [rabbit_misc:rs(X)]) + end end. xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. -vhost(#basic_message{exchange_name = #resource{virtual_host = VHost}}) -> VHost. msg_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, -- cgit v1.2.1 From 51c608f6bc85959559b94eebb3bf02c6e91fd138 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 19 May 2011 20:10:27 +0100 Subject: cosmetic --- src/rabbit_trace.erl | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 9543f8de..267b1d40 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -38,35 +38,35 @@ init(VHost) -> case application:get_env(rabbit, trace_exchanges) of undefined -> none; - {ok, Xs} -> case proplists:get_value(VHost, Xs, none) of + {ok, XNs} -> case proplists:get_value(VHost, XNs, none) of none -> none; - XN -> rabbit_misc:r(VHost, exchange, XN) + Name -> rabbit_misc:r(VHost, exchange, Name) end end. -tap_trace_in(Msg, TraceX) -> - maybe_trace(Msg, TraceX, <<"publish">>, xname(Msg), []). +tap_trace_in(Msg, TraceXN) -> + maybe_trace(Msg, TraceXN, <<"publish">>, xname(Msg), []). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, - TraceX) -> + TraceXN) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(Msg, TraceX, <<"deliver">>, QName, + maybe_trace(Msg, TraceXN, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). maybe_trace(_Msg, none, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(Msg, X, RKPrefix, RKSuffix, Extra) -> +maybe_trace(Msg, XN, RKPrefix, RKSuffix, Extra) -> case xname(Msg) of - X -> ok; - _ -> case rabbit_basic:publish( - X, - <>, - #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, - payload(Msg)) of - {ok, _, _} -> ok; - {error, not_found} -> rabbit_log:info("trace ~s not found~n", - [rabbit_misc:rs(X)]) - end + XN -> ok; + _ -> case rabbit_basic:publish( + XN, + <>, + #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, + payload(Msg)) of + {ok, _, _} -> ok; + {error, not_found} -> rabbit_log:info("trace ~s not found~n", + [rabbit_misc:rs(XN)]) + end end. xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. -- cgit v1.2.1 From bcd37ba7a6b103cb37686dc65e945a17add6b123 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 19 May 2011 20:59:30 +0100 Subject: some more inlining --- src/rabbit_trace.erl | 39 ++++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 267b1d40..09f2545d 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -44,33 +44,33 @@ init(VHost) -> end end. -tap_trace_in(Msg, TraceXN) -> - maybe_trace(Msg, TraceXN, <<"publish">>, xname(Msg), []). +tap_trace_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, + TraceXN) -> + maybe_trace(TraceXN, Msg, <<"publish">>, XName, []). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceXN) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(Msg, TraceXN, <<"deliver">>, QName, + maybe_trace(TraceXN, Msg, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). -maybe_trace(_Msg, none, _RKPrefix, _RKSuffix, _Extra) -> +maybe_trace(none, _Msg, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(Msg, XN, RKPrefix, RKSuffix, Extra) -> - case xname(Msg) of - XN -> ok; - _ -> case rabbit_basic:publish( - XN, - <>, - #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, - payload(Msg)) of - {ok, _, _} -> ok; - {error, not_found} -> rabbit_log:info("trace ~s not found~n", - [rabbit_misc:rs(XN)]) - end +maybe_trace(XName, #basic_message{exchange_name = #resource{name = XName}}, + _RKPrefix, _RKSuffix, _Extra) -> + ok; +maybe_trace(XName, Msg = #basic_message{content = #content{ + payload_fragments_rev = PFR}}, + RKPrefix, RKSuffix, Extra) -> + case rabbit_basic:publish(XName, + <>, + #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, + list_to_binary(lists:reverse(PFR))) of + {ok, _, _} -> ok; + {error, not_found} -> rabbit_log:info("trace ~s not found~n", + [rabbit_misc:rs(XName)]) end. -xname(#basic_message{exchange_name = #resource{name = XName}}) -> XName. - msg_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, content = Content}) -> @@ -90,9 +90,6 @@ msg_to_table(#basic_message{exchange_name = #resource{name = XName}, {<<"properties">>, table, PropsTable}, {<<"node">>, longstr, a2b(node())}]. -payload(#basic_message{content = #content{payload_fragments_rev = PFR}}) -> - list_to_binary(lists:reverse(PFR)). - a2b(A) -> list_to_binary(atom_to_list(A)). type(V) when is_list(V) -> table; -- cgit v1.2.1 From a588eacca187df9f0e97aff6cce91423f9bc8539 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 20 May 2011 12:01:08 +0100 Subject: Make log messages prettier --- src/rabbit_mirror_queue_coordinator.erl | 7 ++++--- src/rabbit_mirror_queue_slave.erl | 12 +++++++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 729749dc..8ddda1cd 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -256,9 +256,10 @@ handle_call(get_gm, _From, State = #state { gm = GM }) -> handle_cast({gm_deaths, Deaths}, State = #state { q = #amqqueue { name = QueueName } }) -> - rabbit_log:info("Master ~p saw deaths ~p for ~s~n", - [self(), [{Pid, node(Pid)} || Pid <- Deaths], - rabbit_misc:rs(QueueName)]), + rabbit_log:info("Mirrored-queue (~s): Master ~s saw deaths of mirrors ~s~n", + [rabbit_misc:rs(QueueName), + rabbit_misc:pid_to_string(self()), + [[rabbit_misc:pid_to_string(Pid), $ ] || Pid <- Deaths]]), case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of {ok, Pid} when node(Pid) =:= node() -> noreply(State); diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index da01e076..fc50c932 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -143,9 +143,10 @@ handle_call({gm_deaths, Deaths}, From, State = #state { q = #amqqueue { name = QueueName }, gm = GM, master_node = MNode }) -> - rabbit_log:info("Slave ~p saw deaths ~p for ~s~n", - [self(), [{Pid, node(Pid)} || Pid <- Deaths], - rabbit_misc:rs(QueueName)]), + rabbit_log:info("Mirrored-queue (~s): Slave ~s saw deaths of mirrors ~s~n", + [rabbit_misc:rs(QueueName), + rabbit_misc:pid_to_string(self()), + [[rabbit_misc:pid_to_string(Pid), $ ] || Pid <- Deaths]]), %% The GM has told us about deaths, which means we're not going to %% receive any more messages from GM case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of @@ -392,8 +393,9 @@ promote_me(From, #state { q = Q, sender_queues = SQ, msg_id_ack = MA, msg_id_status = MS }) -> - rabbit_log:info("Promoting slave ~p for ~s~n", - [self(), rabbit_misc:rs(Q #amqqueue.name)]), + rabbit_log:info("Mirrored-queue (~s): Promoting slave ~s to master~n", + [rabbit_misc:rs(Q #amqqueue.name), + rabbit_misc:pid_to_string(self())]), {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), true = unlink(GM), gen_server2:reply(From, {promote, CPid}), -- cgit v1.2.1 From a749a8f513b881ff35f1de0e225b58aa6575460f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 20 May 2011 14:32:16 +0100 Subject: BQ:idle_timeout => BQ:timeout --- src/rabbit_mirror_queue_master.erl | 10 +++++----- src/rabbit_mirror_queue_slave.erl | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index f54c8c37..e973ea78 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -21,7 +21,7 @@ tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, drain_confirmed/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, - needs_idle_timeout/1, idle_timeout/1, handle_pre_hibernate/1, + needs_timeout/1, timeout/1, handle_pre_hibernate/1, status/1, invoke/3, is_duplicate/3, discard/3]). -export([start/1, stop/0]). @@ -252,11 +252,11 @@ ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> {Result, BQS1} = BQ:ram_duration(BQS), {Result, State #state { backing_queue_state = BQS1 }}. -needs_idle_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> - BQ:needs_idle_timeout(BQS). +needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> + BQ:needs_timeout(BQS). -idle_timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> - State #state { backing_queue_state = BQ:idle_timeout(BQS) }. +timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> + State #state { backing_queue_state = BQ:timeout(BQS) }. handle_pre_hibernate(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index fc50c932..46020271 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -205,7 +205,7 @@ handle_cast(update_ram_duration, backing_queue_state = BQS2 }); handle_cast(sync_timeout, State) -> - noreply(backing_queue_idle_timeout( + noreply(backing_queue_timeout( State #state { sync_timer_ref = undefined })); handle_cast({rollback, _Txn, _ChPid}, State) -> @@ -213,7 +213,7 @@ handle_cast({rollback, _Txn, _ChPid}, State) -> noreply(State). handle_info(timeout, State) -> - noreply(backing_queue_idle_timeout(State)); + noreply(backing_queue_timeout(State)); handle_info({'DOWN', _MonitorRef, process, Pid, _Reason}, State = #state { gm = GM }) -> @@ -499,13 +499,13 @@ next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) -> State1 = ensure_rate_timer( confirm_messages(MsgIds, State #state { backing_queue_state = BQS1 })), - case BQ:needs_idle_timeout(BQS1) of + case BQ:needs_timeout(BQS1) of true -> {ensure_sync_timer(State1), 0}; false -> {stop_sync_timer(State1), hibernate} end. -backing_queue_idle_timeout(State = #state { backing_queue = BQ }) -> - run_backing_queue(BQ, fun (M, BQS) -> M:idle_timeout(BQS) end, State). +backing_queue_timeout(State = #state { backing_queue = BQ }) -> + run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State). ensure_sync_timer(State = #state { sync_timer_ref = undefined }) -> {ok, TRef} = timer:apply_after( -- cgit v1.2.1 From 790dbcf244cd457692f084f99edbed64fa2cde15 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 20 May 2011 14:34:55 +0100 Subject: ...and the other bits I forgot. --- src/rabbit_mirror_queue_slave.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 46020271..63a43197 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -500,8 +500,9 @@ next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) -> confirm_messages(MsgIds, State #state { backing_queue_state = BQS1 })), case BQ:needs_timeout(BQS1) of - true -> {ensure_sync_timer(State1), 0}; - false -> {stop_sync_timer(State1), hibernate} + false -> {stop_sync_timer(State1), hibernate}; + idle -> {stop_sync_timer(State1), 0 }; + timed -> {ensure_sync_timer(State1), 0 } end. backing_queue_timeout(State = #state { backing_queue = BQ }) -> -- cgit v1.2.1 From 48839e8428559495ec7d9b43068a5caef3ca4418 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 20 May 2011 14:51:57 +0100 Subject: Rewrite the rabbitmqctl interface. Define trace_exchanges in rabbit.app. --- docs/rabbitmqctl.1.xml | 41 ++++++++++------------------------------- ebin/rabbit_app.in | 1 + src/rabbit.erl | 16 ---------------- src/rabbit_control.erl | 24 ++++++++---------------- src/rabbit_trace.erl | 34 +++++++++++++++++++++++++++------- 5 files changed, 46 insertions(+), 70 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 62869158..0e212f10 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1266,59 +1266,38 @@ - Configuration variables - - Some configuration values can be changed at run time. Note - that this does not apply to all variables; many are only read - at startup - changing them will have no effect. - + Message Tracing - set_env variable value + start_tracing -p vhost exchange - variable - The name of the variable to set, as the string form of an Erlang term. + vhost + The name of the virtual host for which to start tracing. value - The value to set it to, as the string form of an Erlang term. + The name of the exchange to which trace messages should be published. - Set the value of a configuration variable. + Starts tracing. - get_env variable + stop_tracing -p vhost - variable - The name of the variable to get, as the string form of an Erlang term. - - - - Get the value of a configuration variable, printing either - {ok,Value} or undefined. - - - - - - unset_env variable - - - - variable - The name of the variable to clear, as the string form of an Erlang term. + vhost + The name of the virtual host for which to stop tracing. - Clear the value of a configuration variable. + Stops tracing. diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 014c18b0..7972ba86 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -36,6 +36,7 @@ {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, + {trace_exchanges, []}, {tcp_listen_options, [binary, {packet, raw}, {reuseaddr, true}, diff --git a/src/rabbit.erl b/src/rabbit.erl index 9052f2f9..e6e80b4a 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -25,8 +25,6 @@ -export([log_location/1]). --export([get_env/1, set_env/2, unset_env/1]). - %%--------------------------------------------------------------------------- %% Boot steps. -export([maybe_insert_default_data/0, boot_delegate/0, recover/0]). @@ -188,9 +186,6 @@ -spec(maybe_insert_default_data/0 :: () -> 'ok'). -spec(boot_delegate/0 :: () -> 'ok'). -spec(recover/0 :: () -> 'ok'). --spec(get_env/1 :: (atom()) -> term()). --spec(set_env/2 :: (atom(), term()) -> 'ok'). --spec(unset_env/1 :: (atom()) -> 'ok'). -endif. @@ -519,14 +514,3 @@ log_rotation_result(ok, {error, SaslLogError}) -> {error, {cannot_rotate_sasl_logs, SaslLogError}}; log_rotation_result(ok, ok) -> ok. - -get_env(Key) -> - application:get_env(rabbit, Key). - -set_env(Key, Value) -> - application:set_env(rabbit, Key, Value), - rabbit_channel:refresh_config_all(). - -unset_env(Key) -> - application:unset_env(rabbit, Key), - rabbit_channel:refresh_config_all(). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 5a013711..2a9dd463 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -282,18 +282,15 @@ action(list_consumers, Node, _Args, Opts, Inform) -> Other -> Other end; -action(set_env, Node, [Var, Term], _Opts, Inform) -> - Inform("Setting control variable ~s for node ~p to ~s", [Var, Node, Term]), - rpc_call(Node, rabbit, set_env, [parse(Var), parse(Term)]); +action(start_tracing, Node, [XName], Opts, Inform) -> + VHost = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + Inform("Starting tracing", []), + rpc_call(Node, rabbit_trace, start, [VHost, XName]); -action(get_env, Node, [Var], _Opts, Inform) -> - Inform("Getting control variable ~s for node ~p", [Var, Node]), - Val = rpc_call(Node, rabbit, get_env, [parse(Var)]), - io:format("~p~n", [Val]); - -action(unset_env, Node, [Var], _Opts, Inform) -> - Inform("Clearing control variable ~s for node ~p", [Var, Node]), - rpc_call(Node, rabbit, unset_env, [parse(Var)]); +action(stop_tracing, Node, [], Opts, Inform) -> + VHost = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + Inform("Stopping tracing", []), + rpc_call(Node, rabbit_trace, stop, [VHost]); action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), @@ -338,11 +335,6 @@ default_if_empty(List, Default) when is_list(List) -> true -> [list_to_atom(X) || X <- List] end. -parse(Str) -> - {ok, Tokens, _} = erl_scan:string(Str ++ "."), - {ok, Term} = erl_parse:parse_term(Tokens), - Term. - display_info_list(Results, InfoItemKeys) when is_list(Results) -> lists:foreach( fun (Result) -> display_row( diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 09f2545d..6228c54e 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,11 +16,13 @@ -module(rabbit_trace). --export([init/1, tap_trace_in/2, tap_trace_out/2]). +-export([init/1, tap_trace_in/2, tap_trace_out/2, start/2, stop/1]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). +-define(TRACE_EXCHANGES, trace_exchanges). + %%---------------------------------------------------------------------------- -ifdef(use_specs). @@ -31,17 +33,18 @@ -spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). -spec(tap_trace_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok'). +-spec(start/2 :: (rabbit_types:vhost(), binary()) -> 'ok'). +-spec(stop/1 :: (rabbit_types:vhost()) -> 'ok'). + -endif. %%---------------------------------------------------------------------------- init(VHost) -> - case application:get_env(rabbit, trace_exchanges) of - undefined -> none; - {ok, XNs} -> case proplists:get_value(VHost, XNs, none) of - none -> none; - Name -> rabbit_misc:r(VHost, exchange, Name) - end + {ok, XNs} = application:get_env(rabbit, ?TRACE_EXCHANGES), + case proplists:get_value(VHost, XNs, none) of + none -> none; + Name -> rabbit_misc:r(VHost, exchange, Name) end. tap_trace_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, @@ -54,6 +57,23 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, maybe_trace(TraceXN, Msg, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). +%%---------------------------------------------------------------------------- + +start(VHost, XN) -> + update_config(fun (Xs) -> orddict:store(VHost, list_to_binary(XN), Xs) end). + +stop(VHost) -> + update_config(fun (Xs) -> orddict:erase(VHost, Xs) end). + +update_config(Fun) -> + {ok, Xs0} = application:get_env(rabbit, ?TRACE_EXCHANGES), + Xs = Fun(orddict:from_list(Xs0)), + application:set_env(rabbit, ?TRACE_EXCHANGES, Xs), + rabbit_channel:refresh_config_all(), + ok. + +%%---------------------------------------------------------------------------- + maybe_trace(none, _Msg, _RKPrefix, _RKSuffix, _Extra) -> ok; maybe_trace(XName, #basic_message{exchange_name = #resource{name = XName}}, -- cgit v1.2.1 From 8e01c9b20223d9ae91212bd6fe9e067153d0c124 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 20 May 2011 16:17:16 +0100 Subject: Optimise looking up trace exchange and message construction. --- src/rabbit_basic.erl | 30 +++++++++++++++++++++++++++--- src/rabbit_trace.erl | 32 ++++++++++++++++---------------- 2 files changed, 43 insertions(+), 19 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 3cf73e80..355e390e 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -19,7 +19,7 @@ -include("rabbit_framing.hrl"). -export([publish/1, message/3, message/4, properties/1, delivery/5]). --export([publish/4, publish/7]). +-export([publish/4, publish/7, republish/4, republish/7]). -export([build_content/2, from_content/1]). %%---------------------------------------------------------------------------- @@ -54,6 +54,13 @@ (rabbit_exchange:name(), rabbit_router:routing_key(), boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), properties_input(), binary()) -> publish_result()). +-spec(republish/4 :: + (rabbit_types:exchange(), rabbit_router:routing_key(), + properties_input(), [binary()]) -> publish_result()). +-spec(republish/7 :: + (rabbit_types:exchange(), rabbit_router:routing_key(), + boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), + properties_input(), [binary()]) -> publish_result()). -spec(build_content/2 :: (rabbit_framing:amqp_property_record(), binary()) -> rabbit_types:content()). -spec(from_content/1 :: (rabbit_types:content()) -> @@ -77,7 +84,10 @@ delivery(Mandatory, Immediate, Txn, Message, MsgSeqNo) -> #delivery{mandatory = Mandatory, immediate = Immediate, txn = Txn, sender = self(), message = Message, msg_seq_no = MsgSeqNo}. -build_content(Properties, BodyBin) -> +build_content(Properties, BodyBin) when is_binary(BodyBin) -> + build_content(Properties, [BodyBin]); + +build_content(Properties, PFR) -> %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1 {ClassId, _MethodId} = rabbit_framing_amqp_0_9_1:method_id('basic.publish'), @@ -85,7 +95,7 @@ build_content(Properties, BodyBin) -> properties = Properties, properties_bin = none, protocol = none, - payload_fragments_rev = [BodyBin]}. + payload_fragments_rev = PFR}. from_content(Content) -> #content{class_id = ClassId, @@ -166,6 +176,20 @@ publish(ExchangeName, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, properties(Properties), BodyBin), undefined)). +%% It's faster if you already have an exchange and a message not to +%% look up the exchange and disassemble and reassemble fragments +republish(X, RoutingKey, Props, PFR) -> + republish(X, RoutingKey, false, false, none, Props, PFR). + +%% It's faster if you already have an exchange and a message not to +%% look up the exchange and disassemble and reassemble fragments +republish(X = #exchange{name = XName}, + RoutingKey, Mandatory, Immediate, Txn, Props, PFR) -> + {ok, Msg} = message(XName, RoutingKey, build_content(Props, PFR)), + Delivery = delivery(Mandatory, Immediate, Txn, Msg, undefined), + {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), + {ok, RoutingRes, DeliveredQPids}. + is_message_persistent(#content{properties = #'P_basic'{ delivery_mode = Mode}}) -> case Mode of diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 6228c54e..e0681f15 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -44,17 +44,21 @@ init(VHost) -> {ok, XNs} = application:get_env(rabbit, ?TRACE_EXCHANGES), case proplists:get_value(VHost, XNs, none) of none -> none; - Name -> rabbit_misc:r(VHost, exchange, Name) + Name -> case rabbit_exchange:lookup( + rabbit_misc:r(VHost, exchange, Name)) of + {ok, X} -> X; + _ -> none + end end. tap_trace_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, - TraceXN) -> - maybe_trace(TraceXN, Msg, <<"publish">>, XName, []). + TraceX) -> + maybe_trace(TraceX, Msg, <<"publish">>, XName, []). tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, - TraceXN) -> + TraceX) -> RedeliveredNum = case Redelivered of true -> 1; false -> 0 end, - maybe_trace(TraceXN, Msg, <<"deliver">>, QName, + maybe_trace(TraceX, Msg, <<"deliver">>, QName, [{<<"redelivered">>, signedint, RedeliveredNum}]). %%---------------------------------------------------------------------------- @@ -76,20 +80,16 @@ update_config(Fun) -> maybe_trace(none, _Msg, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(XName, #basic_message{exchange_name = #resource{name = XName}}, +maybe_trace(#exchange{name = Name}, #basic_message{exchange_name = Name}, _RKPrefix, _RKSuffix, _Extra) -> ok; -maybe_trace(XName, Msg = #basic_message{content = #content{ - payload_fragments_rev = PFR}}, +maybe_trace(X, Msg = #basic_message{content = #content{ + payload_fragments_rev = PFR}}, RKPrefix, RKSuffix, Extra) -> - case rabbit_basic:publish(XName, - <>, - #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, - list_to_binary(lists:reverse(PFR))) of - {ok, _, _} -> ok; - {error, not_found} -> rabbit_log:info("trace ~s not found~n", - [rabbit_misc:rs(XName)]) - end. + {ok, _, _} = rabbit_basic:republish( + X, <>, + #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, PFR), + ok. msg_to_table(#basic_message{exchange_name = #resource{name = XName}, routing_keys = RoutingKeys, -- cgit v1.2.1 From 6087e31872aa8a03283ceef3bc8e03774bf54200 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 20 May 2011 17:46:44 +0100 Subject: Fold publish and republish together. --- src/rabbit_basic.erl | 81 ++++++++++++++++++++++++---------------------------- src/rabbit_trace.erl | 4 +-- 2 files changed, 40 insertions(+), 45 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 355e390e..91bdf826 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -19,7 +19,7 @@ -include("rabbit_framing.hrl"). -export([publish/1, message/3, message/4, properties/1, delivery/5]). --export([publish/4, publish/7, republish/4, republish/7]). +-export([publish/4, publish/7]). -export([build_content/2, from_content/1]). %%---------------------------------------------------------------------------- @@ -48,21 +48,16 @@ -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: - (rabbit_exchange:name(), rabbit_router:routing_key(), - properties_input(), binary()) -> publish_result()). + (rabbit_types:exchange() | rabbit_exchange:name(), + rabbit_router:routing_key(), properties_input(), + binary() | [binary()]) -> publish_result()). -spec(publish/7 :: - (rabbit_exchange:name(), rabbit_router:routing_key(), - boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), - properties_input(), binary()) -> publish_result()). --spec(republish/4 :: - (rabbit_types:exchange(), rabbit_router:routing_key(), - properties_input(), [binary()]) -> publish_result()). --spec(republish/7 :: - (rabbit_types:exchange(), rabbit_router:routing_key(), - boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), - properties_input(), [binary()]) -> publish_result()). --spec(build_content/2 :: (rabbit_framing:amqp_property_record(), binary()) -> - rabbit_types:content()). + (rabbit_types:exchange() | rabbit_exchange:name(), + rabbit_router:routing_key(), boolean(), boolean(), + rabbit_types:maybe(rabbit_types:txn()), properties_input(), + binary() | [binary()]) -> publish_result()). +-spec(build_content/2 :: (rabbit_framing:amqp_property_record(), + binary() | [binary()]) -> rabbit_types:content()). -spec(from_content/1 :: (rabbit_types:content()) -> {rabbit_framing:amqp_property_record(), binary()}). @@ -73,13 +68,14 @@ publish(Delivery = #delivery{ message = #basic_message{exchange_name = ExchangeName}}) -> case rabbit_exchange:lookup(ExchangeName) of - {ok, X} -> - {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), - {ok, RoutingRes, DeliveredQPids}; - Other -> - Other + {ok, X} -> publish(X, Delivery); + Other -> Other end. +publish(X, Delivery) -> + {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), + {ok, RoutingRes, DeliveredQPids}. + delivery(Mandatory, Immediate, Txn, Message, MsgSeqNo) -> #delivery{mandatory = Mandatory, immediate = Immediate, txn = Txn, sender = self(), message = Message, msg_seq_no = MsgSeqNo}. @@ -136,9 +132,9 @@ message(ExchangeName, RoutingKey, {error, _Reason} = Error -> Error end. -message(ExchangeName, RoutingKey, RawProperties, BodyBin) -> +message(ExchangeName, RoutingKey, RawProperties, Body) -> Properties = properties(RawProperties), - Content = build_content(Properties, BodyBin), + Content = build_content(Properties, Body), {ok, Msg} = message(ExchangeName, RoutingKey, Content), Msg. @@ -163,32 +159,31 @@ indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1). %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. -publish(ExchangeName, RoutingKeyBin, Properties, BodyBin) -> - publish(ExchangeName, RoutingKeyBin, false, false, none, Properties, +publish(Exchange, RoutingKeyBin, Properties, BodyBin) -> + publish(Exchange, RoutingKeyBin, false, false, none, Properties, BodyBin). %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. -publish(ExchangeName, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, +publish(Exchange, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, BodyBin) -> - publish(delivery(Mandatory, Immediate, Txn, - message(ExchangeName, RoutingKeyBin, - properties(Properties), BodyBin), - undefined)). - -%% It's faster if you already have an exchange and a message not to -%% look up the exchange and disassemble and reassemble fragments -republish(X, RoutingKey, Props, PFR) -> - republish(X, RoutingKey, false, false, none, Props, PFR). - -%% It's faster if you already have an exchange and a message not to -%% look up the exchange and disassemble and reassemble fragments -republish(X = #exchange{name = XName}, - RoutingKey, Mandatory, Immediate, Txn, Props, PFR) -> - {ok, Msg} = message(XName, RoutingKey, build_content(Props, PFR)), - Delivery = delivery(Mandatory, Immediate, Txn, Msg, undefined), - {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), - {ok, RoutingRes, DeliveredQPids}. + case exchange(Exchange) of + X = #exchange{} -> + publish(delivery(Mandatory, Immediate, Txn, + message(X#exchange.name, RoutingKeyBin, + properties(Properties), BodyBin), + undefined)); + _ -> + {ok, unroutable, []} + end. + +exchange(X = #exchange{}) -> + X; +exchange(N = #resource{kind = exchange}) -> + case rabbit_exchange:lookup(N) of + {ok, X} -> X; + Err -> Err + end. is_message_persistent(#content{properties = #'P_basic'{ delivery_mode = Mode}}) -> diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index e0681f15..cf8ee64c 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -27,7 +27,7 @@ -ifdef(use_specs). --type(state() :: rabbit_exchange:name() | 'none'). +-type(state() :: rabbit_types:exchange() | 'none'). -spec(init/1 :: (rabbit_types:vhost()) -> state()). -spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). @@ -86,7 +86,7 @@ maybe_trace(#exchange{name = Name}, #basic_message{exchange_name = Name}, maybe_trace(X, Msg = #basic_message{content = #content{ payload_fragments_rev = PFR}}, RKPrefix, RKSuffix, Extra) -> - {ok, _, _} = rabbit_basic:republish( + {ok, _, _} = rabbit_basic:publish( X, <>, #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, PFR), ok. -- cgit v1.2.1 From bdc6a0cc4f499ed9d63fa66155c367faf2ea2732 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 20 May 2011 18:15:24 +0100 Subject: Switch to hard coding the exchange name. --- ebin/rabbit_app.in | 2 +- src/rabbit_control.erl | 10 +++++----- src/rabbit_trace.erl | 33 ++++++++++++++++----------------- src/rabbit_upgrade_functions.erl | 21 +++++++++++++++++++++ src/rabbit_vhost.erl | 13 +++++++------ 5 files changed, 50 insertions(+), 29 deletions(-) diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 7972ba86..7dabb8c3 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -36,7 +36,7 @@ {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, - {trace_exchanges, []}, + {trace_vhosts, []}, {tcp_listen_options, [binary, {packet, raw}, {reuseaddr, true}, diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 2a9dd463..52cfac9b 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -282,14 +282,14 @@ action(list_consumers, Node, _Args, Opts, Inform) -> Other -> Other end; -action(start_tracing, Node, [XName], Opts, Inform) -> +action(trace_on, Node, [], Opts, Inform) -> VHost = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), - Inform("Starting tracing", []), - rpc_call(Node, rabbit_trace, start, [VHost, XName]); + Inform("Starting tracing for vhost ~p", [VHost]), + rpc_call(Node, rabbit_trace, start, [VHost]); -action(stop_tracing, Node, [], Opts, Inform) -> +action(trace_off, Node, [], Opts, Inform) -> VHost = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), - Inform("Stopping tracing", []), + Inform("Stopping tracing for vhost ~p", [VHost]), rpc_call(Node, rabbit_trace, stop, [VHost]); action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index cf8ee64c..1f9fe932 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,12 +16,13 @@ -module(rabbit_trace). --export([init/1, tap_trace_in/2, tap_trace_out/2, start/2, stop/1]). +-export([init/1, tap_trace_in/2, tap_trace_out/2, start/1, stop/1]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --define(TRACE_EXCHANGES, trace_exchanges). +-define(TRACE_VHOSTS, trace_vhosts). +-define(XNAME, <<"amq.rabbitmq.trace">>). %%---------------------------------------------------------------------------- @@ -33,7 +34,7 @@ -spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). -spec(tap_trace_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok'). --spec(start/2 :: (rabbit_types:vhost(), binary()) -> 'ok'). +-spec(start/1 :: (rabbit_types:vhost()) -> 'ok'). -spec(stop/1 :: (rabbit_types:vhost()) -> 'ok'). -endif. @@ -41,14 +42,12 @@ %%---------------------------------------------------------------------------- init(VHost) -> - {ok, XNs} = application:get_env(rabbit, ?TRACE_EXCHANGES), - case proplists:get_value(VHost, XNs, none) of - none -> none; - Name -> case rabbit_exchange:lookup( - rabbit_misc:r(VHost, exchange, Name)) of - {ok, X} -> X; - _ -> none - end + {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS), + case lists:member(VHost, VHosts) of + false -> none; + true -> {ok, X} = rabbit_exchange:lookup( + rabbit_misc:r(VHost, exchange, ?XNAME)), + X end. tap_trace_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, @@ -63,16 +62,16 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, %%---------------------------------------------------------------------------- -start(VHost, XN) -> - update_config(fun (Xs) -> orddict:store(VHost, list_to_binary(XN), Xs) end). +start(VHost) -> + update_config(fun (VHosts) -> [VHost | lists:delete(VHost, VHosts)] end). stop(VHost) -> - update_config(fun (Xs) -> orddict:erase(VHost, Xs) end). + update_config(fun (VHosts) -> lists:delete(VHost, VHosts) end). update_config(Fun) -> - {ok, Xs0} = application:get_env(rabbit, ?TRACE_EXCHANGES), - Xs = Fun(orddict:from_list(Xs0)), - application:set_env(rabbit, ?TRACE_EXCHANGES, Xs), + {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS), + VHosts = Fun(VHosts0), + application:set_env(rabbit, ?TRACE_VHOSTS, VHosts), rabbit_channel:refresh_config_all(), ok. diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 31bbb929..bead388d 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -28,6 +28,7 @@ -rabbit_upgrade({topic_trie, mnesia, []}). -rabbit_upgrade({semi_durable_route, mnesia, []}). -rabbit_upgrade({exchange_event_serial, mnesia, []}). +-rabbit_upgrade({trace_exchanges, mnesia, []}). %% ------------------------------------------------------------------- @@ -41,6 +42,7 @@ -spec(topic_trie/0 :: () -> 'ok'). -spec(exchange_event_serial/0 :: () -> 'ok'). -spec(semi_durable_route/0 :: () -> 'ok'). +-spec(trace_exchanges/0 :: () -> 'ok'). -endif. @@ -113,6 +115,12 @@ exchange_event_serial() -> create(rabbit_exchange_serial, [{record_name, exchange_serial}, {attributes, [name, next]}]). +trace_exchanges() -> + [declare_exchange( + rabbit_misc:r(VHost, exchange, <<"amq.rabbitmq.trace">>), topic) || + VHost <- rabbit_vhost:list()], + ok. + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> @@ -129,3 +137,16 @@ transform(TableName, Fun, FieldList, NewRecordName) -> create(Tab, TabDef) -> {atomic, ok} = mnesia:create_table(Tab, TabDef), ok. + +%% Dumb replacement for rabbit_exchange:declare that does not require +%% the exchange type registry or worker pool to be running by dint of +%% not validating anything and assuming the exchange type does not +%% require serialisation. +declare_exchange(XName, Type) -> + X = #exchange{name = XName, + type = Type, + durable = true, + auto_delete = false, + internal = false, + arguments = []}, + ok = mnesia:dirty_write(rabbit_durable_exchange, X). diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl index 24c130ed..67c73cf2 100644 --- a/src/rabbit_vhost.erl +++ b/src/rabbit_vhost.erl @@ -51,12 +51,13 @@ add(VHostPath) -> rabbit_misc:r(VHostPath, exchange, Name), Type, true, false, false, []) || {Name,Type} <- - [{<<"">>, direct}, - {<<"amq.direct">>, direct}, - {<<"amq.topic">>, topic}, - {<<"amq.match">>, headers}, %% per 0-9-1 pdf - {<<"amq.headers">>, headers}, %% per 0-9-1 xml - {<<"amq.fanout">>, fanout}]], + [{<<"">>, direct}, + {<<"amq.direct">>, direct}, + {<<"amq.topic">>, topic}, + {<<"amq.match">>, headers}, %% per 0-9-1 pdf + {<<"amq.headers">>, headers}, %% per 0-9-1 xml + {<<"amq.fanout">>, fanout}, + {<<"amq.rabbitmq.trace">>, topic}]], ok end), rabbit_log:info("Added vhost ~p~n", [VHostPath]), -- cgit v1.2.1 From 77b47e488406bb4c65f5c4b0773b209268038824 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 20 May 2011 18:27:35 +0100 Subject: That's an awful lot of work to solve a potential memory leak... --- src/rabbit_amqqueue.erl | 19 +++- src/rabbit_amqqueue_process.erl | 46 ++++---- src/rabbit_mirror_queue_coordinator.erl | 47 +++++++-- src/rabbit_mirror_queue_master.erl | 53 ++++++++-- src/rabbit_mirror_queue_slave.erl | 179 +++++++++++++++++++++++--------- 5 files changed, 257 insertions(+), 87 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 8c374ef3..0550f13b 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -33,6 +33,7 @@ %% internal -export([internal_declare/2, internal_delete/1, run_backing_queue/3, run_backing_queue_async/3, + run_backing_queue/4, run_backing_queue_async/4, sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, set_maximum_since_use/2, maybe_expire/1, drop_expired/1, emit_stats/1]). @@ -149,6 +150,14 @@ -spec(run_backing_queue_async/3 :: (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). +-spec(run_backing_queue/4 :: + (pid(), atom(), + (fun ((atom(), A) -> {[rabbit_types:msg_id()], A})), + integer() | 'default') -> 'ok'). +-spec(run_backing_queue_async/4 :: + (pid(), atom(), + (fun ((atom(), A) -> {[rabbit_types:msg_id()], A})), + integer() | 'default') -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). @@ -448,10 +457,16 @@ internal_delete(QueueName) -> end). run_backing_queue(QPid, Mod, Fun) -> - gen_server2:call(QPid, {run_backing_queue, Mod, Fun}, infinity). + run_backing_queue(QPid, Mod, Fun, default). run_backing_queue_async(QPid, Mod, Fun) -> - gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). + run_backing_queue_async(QPid, Mod, Fun, default). + +run_backing_queue(QPid, Mod, Fun, Priority) -> + gen_server2:call(QPid, {run_backing_queue, Mod, Fun, Priority}, infinity). + +run_backing_queue_async(QPid, Mod, Fun, Priority) -> + gen_server2:cast(QPid, {run_backing_queue, Mod, Fun, Priority}). sync_timeout(QPid) -> gen_server2:cast(QPid, sync_timeout). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index d654f372..7daf869b 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -127,7 +127,7 @@ init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS, State = requeue_and_run( AckTags, process_args( - #q{q = Q#amqqueue{pid = self()}, + #q{q = Q, exclusive_consumer = none, has_had_consumers = false, backing_queue = BQ, @@ -843,29 +843,31 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {run_backing_queue, _Mod, _Fun} -> 6; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {run_backing_queue, _Mod, _Fun, default} -> 6; + {run_backing_queue, _Mod, _Fun, Priority} -> Priority; + _ -> 0 end. prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; - delete_immediately -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; - maybe_expire -> 8; - drop_expired -> 8; - emit_stats -> 7; - {ack, _Txn, _AckTags, _ChPid} -> 7; - {reject, _AckTags, _Requeue, _ChPid} -> 7; - {notify_sent, _ChPid} -> 7; - {unblock, _ChPid} -> 7; - {run_backing_queue, _Mod, _Fun} -> 6; - sync_timeout -> 6; - _ -> 0 + update_ram_duration -> 8; + delete_immediately -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + {ack, _Txn, _AckTags, _ChPid} -> 7; + {reject, _AckTags, _Requeue, _ChPid} -> 7; + {notify_sent, _ChPid} -> 7; + {unblock, _ChPid} -> 7; + {run_backing_queue, _Mod, _Fun, default} -> 6; + {run_backing_queue, _Mod, _Fun, Priority} -> Priority; + sync_timeout -> 6; + _ -> 0 end. prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, @@ -1079,11 +1081,11 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> noreply(requeue_and_run(AckTags, State)) end; -handle_call({run_backing_queue, Mod, Fun}, _From, State) -> +handle_call({run_backing_queue, Mod, Fun, _Priority}, _From, State) -> reply(ok, run_backing_queue(Mod, Fun, State)). -handle_cast({run_backing_queue, Mod, Fun}, State) -> +handle_cast({run_backing_queue, Mod, Fun, _Priority}, State) -> noreply(run_backing_queue(Mod, Fun, State)); handle_cast(sync_timeout, State) -> diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 8ddda1cd..5660112a 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -16,7 +16,7 @@ -module(rabbit_mirror_queue_coordinator). --export([start_link/2, get_gm/1]). +-export([start_link/3, get_gm/1, ensure_monitoring/2]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). @@ -30,7 +30,9 @@ -include("gm_specs.hrl"). -record(state, { q, - gm + gm, + monitors, + death_fun }). -define(ONE_SECOND, 1000). @@ -223,17 +225,20 @@ %% %%---------------------------------------------------------------------------- -start_link(Queue, GM) -> - gen_server2:start_link(?MODULE, [Queue, GM], []). +start_link(Queue, GM, DeathFun) -> + gen_server2:start_link(?MODULE, [Queue, GM, DeathFun], []). get_gm(CPid) -> gen_server2:call(CPid, get_gm, infinity). +ensure_monitoring(CPid, Pids) -> + gen_server2:cast(CPid, {ensure_monitoring, Pids}). + %% --------------------------------------------------------------------------- %% gen_server %% --------------------------------------------------------------------------- -init([#amqqueue { name = QueueName } = Q, GM]) -> +init([#amqqueue { name = QueueName } = Q, GM, DeathFun]) -> GM1 = case GM of undefined -> ok = gm:create_tables(), @@ -248,7 +253,11 @@ init([#amqqueue { name = QueueName } = Q, GM]) -> end, {ok, _TRef} = timer:apply_interval(?ONE_SECOND, gm, broadcast, [GM1, heartbeat]), - {ok, #state { q = Q, gm = GM1 }, hibernate, + {ok, #state { q = Q, + gm = GM1, + monitors = dict:new(), + death_fun = DeathFun }, + hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. handle_call(get_gm, _From, State = #state { gm = GM }) -> @@ -265,7 +274,29 @@ handle_cast({gm_deaths, Deaths}, noreply(State); {error, not_found} -> {stop, normal, State} - end. + end; + +handle_cast({ensure_monitoring, Pids}, + State = #state { monitors = Monitors }) -> + Monitors1 = + lists:foldl(fun (Pid, MonitorsN) -> + case dict:is_key(Pid, MonitorsN) of + true -> MonitorsN; + false -> MRef = erlang:monitor(process, Pid), + dict:store(Pid, MRef, MonitorsN) + end + end, Monitors, Pids), + noreply(State #state { monitors = Monitors1 }). + +handle_info({'DOWN', _MonitorRef, process, Pid, _Reason}, + State = #state { monitors = Monitors, + death_fun = Fun }) -> + noreply( + case dict:is_key(Pid, Monitors) of + false -> State; + true -> ok = Fun(Pid), + State #state { monitors = dict:erase(Pid, Monitors) } + end); handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}. @@ -295,6 +326,8 @@ members_changed([CPid], _Births, Deaths) -> handle_msg([_CPid], _From, heartbeat) -> ok; +handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) -> + ok = gen_server2:cast(CPid, Msg); handle_msg([_CPid], _From, _Msg) -> ok. diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index e973ea78..0e7f32f0 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -26,7 +26,7 @@ -export([start/1, stop/0]). --export([promote_backing_queue_state/5]). +-export([promote_backing_queue_state/6, sender_death_fun/0]). -behaviour(rabbit_backing_queue). @@ -39,7 +39,8 @@ set_delivered, seen_status, confirmed, - ack_msg_id + ack_msg_id, + known_senders }). %% For general documentation of HA design, see @@ -58,9 +59,31 @@ stop() -> %% Same as start/1. exit({not_valid_for_generic_backing_queue, ?MODULE}). +sender_death_fun() -> + Self = self(), + fun (DeadPid) -> + %% Purposefully set the priority to 0 here so that we + %% don't overtake any messages from DeadPid that are + %% already in the queue. + rabbit_amqqueue:run_backing_queue_async( + Self, ?MODULE, + fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> + rabbit_log:info("Master saw death of sender ~p~n", [DeadPid]), + case sets:is_element(DeadPid, KS) of + false -> + State; + true -> + ok = gm:broadcast(GM, {sender_death, DeadPid}), + KS1 = sets:del_element(DeadPid, KS), + State #state { known_senders = KS1 } + end + end, 0) + end. + init(#amqqueue { arguments = Args, name = QName } = Q, Recover, AsyncCallback, SyncCallback) -> - {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, undefined), + {ok, CPid} = rabbit_mirror_queue_coordinator:start_link( + Q, undefined, sender_death_fun()), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), Nodes1 = case Nodes of @@ -78,9 +101,10 @@ init(#amqqueue { arguments = Args, name = QName } = Q, Recover, set_delivered = 0, seen_status = dict:new(), confirmed = [], - ack_msg_id = dict:new() }. + ack_msg_id = dict:new(), + known_senders = sets:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus) -> +promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, KS) -> #state { gm = GM, coordinator = CPid, backing_queue = BQ, @@ -88,7 +112,8 @@ promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus) -> set_delivered = BQ:len(BQS), seen_status = SeenStatus, confirmed = [], - ack_msg_id = dict:new() }. + ack_msg_id = dict:new(), + known_senders = sets:from_list(KS) }. terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but @@ -119,7 +144,7 @@ publish(Msg = #basic_message { id = MsgId }, MsgProps, ChPid, false = dict:is_key(MsgId, SS), %% ASSERTION ok = gm:broadcast(GM, {publish, false, ChPid, MsgProps, Msg}), BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), - State #state { backing_queue_state = BQS1 }. + ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }). publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, ChPid, State = #state { gm = GM, @@ -136,8 +161,9 @@ publish_delivered(AckRequired, Msg = #basic_message { id = MsgId }, MsgProps, {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), AM1 = maybe_store_acktag(AckTag, MsgId, AM), - {AckTag, State #state { backing_queue_state = BQS1, - ack_msg_id = AM1 }}. + {AckTag, + ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1, + ack_msg_id = AM1 })}. dropwhile(Fun, State = #state { gm = GM, backing_queue = BQ, @@ -341,3 +367,12 @@ maybe_store_acktag(undefined, _MsgId, AM) -> AM; maybe_store_acktag(AckTag, MsgId, AM) -> dict:store(AckTag, MsgId, AM). + +ensure_monitoring(ChPid, State = #state { coordinator = CPid, + known_senders = KS }) -> + case sets:is_element(ChPid, KS) of + true -> State; + false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring( + CPid, [ChPid]), + State #state { known_senders = sets:add_element(ChPid, KS) } + end. diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 63a43197..7fc2c8cb 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -49,10 +49,11 @@ -define(SYNC_INTERVAL, 25). %% milliseconds -define(RAM_DURATION_UPDATE_INTERVAL, 5000). +-define(DEATH_TIMEOUT, 20000). %% 20 seconds -record(state, { q, gm, - master_node, + master_pid, backing_queue, backing_queue_state, sync_timer_ref, @@ -62,7 +63,8 @@ msg_id_ack, %% :: MsgId -> AckTag ack_num, - msg_id_status + msg_id_status, + known_senders }). start_link(Q) -> @@ -102,7 +104,7 @@ init([#amqqueue { name = QueueName } = Q]) -> BQS = bq_init(BQ, Q, false), {ok, #state { q = Q, gm = GM, - master_node = node(MPid), + master_pid = MPid, backing_queue = BQ, backing_queue_state = BQS, rate_timer_ref = undefined, @@ -112,7 +114,8 @@ init([#amqqueue { name = QueueName } = Q]) -> msg_id_ack = dict:new(), ack_num = 0, - msg_id_status = dict:new() + msg_id_status = dict:new(), + known_senders = dict:new() }, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -140,9 +143,9 @@ handle_call({deliver, Delivery = #delivery {}}, From, State) -> noreply(maybe_enqueue_message(Delivery, true, State)); handle_call({gm_deaths, Deaths}, From, - State = #state { q = #amqqueue { name = QueueName }, - gm = GM, - master_node = MNode }) -> + State = #state { q = #amqqueue { name = QueueName }, + gm = GM, + master_pid = MPid }) -> rabbit_log:info("Mirrored-queue (~s): Slave ~s saw deaths of mirrors ~s~n", [rabbit_misc:rs(QueueName), rabbit_misc:pid_to_string(self()), @@ -150,7 +153,7 @@ handle_call({gm_deaths, Deaths}, From, %% The GM has told us about deaths, which means we're not going to %% receive any more messages from GM case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of - {ok, Pid} when node(Pid) =:= MNode -> + {ok, Pid} when node(Pid) =:= node(MPid) -> %% master hasn't changed reply(ok, State); {ok, Pid} when node(Pid) =:= node() -> @@ -161,20 +164,20 @@ handle_call({gm_deaths, Deaths}, From, gen_server2:reply(From, ok), erlang:monitor(process, Pid), ok = gm:broadcast(GM, heartbeat), - noreply(State #state { master_node = node(Pid) }); + noreply(State #state { master_pid = Pid }); {error, not_found} -> gen_server2:reply(From, ok), {stop, normal, State} end; -handle_call({run_backing_queue, Mod, Fun}, _From, State) -> +handle_call({run_backing_queue, Mod, Fun, _Priority}, _From, State) -> reply(ok, run_backing_queue(Mod, Fun, State)); handle_call({commit, _Txn, _ChPid}, _From, State) -> %% We don't support transactions in mirror queues reply(ok, State). -handle_cast({run_backing_queue, Mod, Fun}, State) -> +handle_cast({run_backing_queue, Mod, Fun, _Priority}, State) -> noreply(run_backing_queue(Mod, Fun, State)); handle_cast({gm, Instruction}, State) -> @@ -215,11 +218,14 @@ handle_cast({rollback, _Txn, _ChPid}, State) -> handle_info(timeout, State) -> noreply(backing_queue_timeout(State)); -handle_info({'DOWN', _MonitorRef, process, Pid, _Reason}, - State = #state { gm = GM }) -> - ok = gm:broadcast(GM, {process_death, Pid}), +handle_info({'DOWN', _MonitorRef, process, MPid, _Reason}, + State = #state { gm = GM, master_pid = MPid }) -> + ok = gm:broadcast(GM, {process_death, MPid}), noreply(State); +handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) -> + noreply(local_sender_death(ChPid, State)); + handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}. @@ -259,21 +265,23 @@ handle_pre_hibernate(State = #state { backing_queue = BQ, prioritise_call(Msg, _From, _State) -> case Msg of - {run_backing_queue, _Mod, _Fun} -> 6; - {gm_deaths, _Deaths} -> 5; - _ -> 0 + {run_backing_queue, _Mod, _Fun, default} -> 6; + {run_backing_queue, _Mod, _Fun, Priority} -> Priority; + {gm_deaths, _Deaths} -> 5; + _ -> 0 end. prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; - {run_backing_queue, _Mod, _Fun} -> 6; - sync_timeout -> 6; - {gm, _Msg} -> 5; - {post_commit, _Txn, _AckTags} -> 4; - _ -> 0 + update_ram_duration -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + {run_backing_queue, _Mod, _Fun, default} -> 6; + {run_backing_queue, _Mod, _Fun, Priority} -> Priority; + sync_timeout -> 6; + {gm, _Msg} -> 5; + {post_commit, _Txn, _AckTags} -> 4; + _ -> 0 end. %% --------------------------------------------------------------------------- @@ -291,6 +299,9 @@ members_changed([SPid], _Births, Deaths) -> handle_msg([_SPid], _From, heartbeat) -> ok; +handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) -> + %% This is only of value to the master + ok; handle_msg([SPid], _From, {process_death, Pid}) -> inform_deaths(SPid, [Pid]); handle_msg([SPid], _From, Msg) -> @@ -327,9 +338,9 @@ bq_init(BQ, Q, Recover) -> end). run_backing_queue(rabbit_mirror_queue_master, Fun, State) -> - %% Yes, this might look a little crazy, but see comments around - %% process_instruction({tx_commit,...}, State). - Fun(rabbit_mirror_queue_master, State); + %% Yes, this might look a little crazy, but see comments in + %% local_sender_death/2 + Fun(?MODULE, State); run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. @@ -392,15 +403,27 @@ promote_me(From, #state { q = Q, rate_timer_ref = RateTRef, sender_queues = SQ, msg_id_ack = MA, - msg_id_status = MS }) -> + msg_id_status = MS, + known_senders = KS }) -> rabbit_log:info("Mirrored-queue (~s): Promoting slave ~s to master~n", [rabbit_misc:rs(Q #amqqueue.name), rabbit_misc:pid_to_string(self())]), - {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q, GM), + Q1 = Q #amqqueue { pid = self() }, + {ok, CPid} = rabbit_mirror_queue_coordinator:start_link( + Q1, GM, rabbit_mirror_queue_master:sender_death_fun()), true = unlink(GM), gen_server2:reply(From, {promote, CPid}), ok = gm:confirmed_broadcast(GM, heartbeat), + %% Everything that we're monitoring, we need to ensure our new + %% coordinator is monitoring. + + MonitoringPids = [begin true = erlang:demonitor(MRef), + Pid + end || {Pid, MRef} <- dict:to_list(KS)], + ok = rabbit_mirror_queue_coordinator:ensure_monitoring( + CPid, MonitoringPids), + %% We find all the messages that we've received from channels but %% not from gm, and if they're due to be enqueued on promotion %% then we pass them to the @@ -472,7 +495,7 @@ promote_me(From, #state { q = Q, Status =:= published orelse Status =:= confirmed]), MasterState = rabbit_mirror_queue_master:promote_backing_queue_state( - CPid, BQ, BQS, GM, SS), + CPid, BQ, BQS, GM, SS, MonitoringPids), MTC = dict:from_list( [{MsgId, {ChPid, MsgSeqNo}} || @@ -482,7 +505,7 @@ promote_me(From, #state { q = Q, Deliveries = [Delivery || {_ChPid, PubQ} <- dict:to_list(SQ), {Delivery, true} <- queue:to_list(PubQ)], QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( - Q, rabbit_mirror_queue_master, MasterState, RateTRef, + Q1, rabbit_mirror_queue_master, MasterState, RateTRef, AckTags, Deliveries, MTC), {become, rabbit_amqqueue_process, QueueState, hibernate}. @@ -540,6 +563,52 @@ stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> {ok, cancel} = timer:cancel(TRef), State #state { rate_timer_ref = undefined }. +ensure_monitoring(ChPid, State = #state { known_senders = KS }) -> + case dict:is_key(ChPid, KS) of + true -> State; + false -> MRef = erlang:monitor(process, ChPid), + State #state { known_senders = dict:store(ChPid, MRef, KS) } + end. + +local_sender_death(ChPid, State = #state { known_senders = KS }) -> + case dict:is_key(ChPid, KS) of + false -> + ok; + true -> + %% We have to deal with the possibility that we'll be + %% promoted to master before this thing gets + %% run. Consequently we set the module to + %% rabbit_mirror_queue_master so that if we do become a + %% rabbit_amqqueue_process before then, sane things will + %% happen. + Fun = + fun (?MODULE, State1 = #state { known_senders = KS1, + gm = GM }) -> + %% We're running still as a slave + ok = case dict:is_key(ChPid, KS1) of + false -> + ok; + true -> + gm:broadcast( + GM, {ensure_monitoring, [ChPid]}) + end, + State1; + (rabbit_mirror_queue_master, State1) -> + %% We've become a master. State1 is now opaque + %% to us. When we became master, if ChPid was + %% still known to us then we'd have set up + %% monitoring of it then, so this is now a + %% noop. + State1 + end, + %% Note that we do not remove our knowledge of this ChPid + %% until we get the sender_death from GM. + timer:apply_after( + ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue_async, + [self(), rabbit_mirror_queue_master, Fun]) + end, + State. + maybe_enqueue_message( Delivery = #delivery { message = #basic_message { id = MsgId }, msg_seq_no = MsgSeqNo, @@ -548,6 +617,7 @@ maybe_enqueue_message( EnqueueOnPromotion, State = #state { sender_queues = SQ, msg_id_status = MS }) -> + State1 = ensure_monitoring(ChPid, State), %% We will never see {published, ChPid, MsgSeqNo} here. case dict:find(MsgId, MS) of error -> @@ -557,30 +627,30 @@ maybe_enqueue_message( end, SQ1 = dict:store(ChPid, queue:in({Delivery, EnqueueOnPromotion}, MQ), SQ), - State #state { sender_queues = SQ1 }; + State1 #state { sender_queues = SQ1 }; {ok, {confirmed, ChPid}} -> %% BQ has confirmed it but we didn't know what the %% msg_seq_no was at the time. We do now! ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), - State #state { msg_id_status = dict:erase(MsgId, MS) }; + State1 #state { msg_id_status = dict:erase(MsgId, MS) }; {ok, {published, ChPid}} -> %% It was published to the BQ and we didn't know the %% msg_seq_no so couldn't confirm it at the time. - case needs_confirming(Delivery, State) of + case needs_confirming(Delivery, State1) of never -> - State #state { msg_id_status = dict:erase(MsgId, MS) }; + State1 #state { msg_id_status = dict:erase(MsgId, MS) }; eventually -> - State #state { + State1 #state { msg_id_status = dict:store(MsgId, {published, ChPid, MsgSeqNo}, MS) }; immediately -> ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), - State #state { msg_id_status = dict:erase(MsgId, MS) } + State1 #state { msg_id_status = dict:erase(MsgId, MS) } end; {ok, discarded} -> %% We've already heard from GM that the msg is to be %% discarded. We won't see this again. - State #state { msg_id_status = dict:erase(MsgId, MS) } + State1 #state { msg_id_status = dict:erase(MsgId, MS) } end; maybe_enqueue_message(_Delivery, _EnqueueOnPromotion, State) -> %% We don't support txns in mirror queues. @@ -601,6 +671,7 @@ process_instruction( %% which means that we're going to have to hang on to the fact %% that we've seen the msg_id confirmed until we can associate it %% with a msg_seq_no. + State1 = ensure_monitoring(ChPid, State), MS1 = dict:store(MsgId, {published, ChPid}, MS), {SQ1, MS2} = case dict:find(ChPid, SQ) of @@ -618,7 +689,7 @@ process_instruction( %% first. Thus we need to deal with confirms %% here. {dict:store(ChPid, MQ1, SQ), - case needs_confirming(Delivery, State) of + case needs_confirming(Delivery, State1) of never -> MS; eventually -> @@ -639,19 +710,19 @@ process_instruction( end end, - State1 = State #state { sender_queues = SQ1, - msg_id_status = MS2 }, + State2 = State1 #state { sender_queues = SQ1, + msg_id_status = MS2 }, {ok, case Deliver of false -> BQS1 = BQ:publish(Msg, MsgProps, ChPid, BQS), - State1 #state { backing_queue_state = BQS1 }; + State2 #state { backing_queue_state = BQS1 }; {true, AckRequired} -> {AckTag, BQS1} = BQ:publish_delivered(AckRequired, Msg, MsgProps, ChPid, BQS), maybe_store_ack(AckRequired, MsgId, AckTag, - State1 #state { backing_queue_state = BQS1 }) + State2 #state { backing_queue_state = BQS1 }) end}; process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, State = #state { sender_queues = SQ, @@ -660,6 +731,7 @@ process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, msg_id_status = MS }) -> %% Many of the comments around the publish head above apply here %% too. + State1 = ensure_monitoring(ChPid, State), MS1 = dict:store(MsgId, discarded, MS), {SQ1, MS2} = case dict:find(ChPid, SQ) of @@ -685,9 +757,9 @@ process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, end end, BQS1 = BQ:discard(Msg, ChPid, BQS), - {ok, State #state { sender_queues = SQ1, - msg_id_status = MS2, - backing_queue_state = BQS1 }}; + {ok, State1 #state { sender_queues = SQ1, + msg_id_status = MS2, + backing_queue_state = BQS1 }}; process_instruction({set_length, Length}, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> @@ -746,6 +818,19 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, State #state { msg_id_ack = dict:new(), backing_queue_state = BQS2 } end}; +process_instruction({sender_death, ChPid}, + State = #state { sender_queues = SQ, + known_senders = KS }) -> + rabbit_log:info("Slave received death of sender ~p~n", [ChPid]), + {ok, case dict:find(ChPid, KS) of + error -> + State; + {ok, MRef} -> + true = erlang:demonitor(MRef), + KS1 = dict:erase(ChPid, KS), + SQ1 = dict:erase(ChPid, SQ), + State #state { sender_queues = SQ1, known_senders = KS1} + end}; process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> -- cgit v1.2.1 From b7a5e36688ba5f1bc680717675c418112ec4c081 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 20 May 2011 18:34:00 +0100 Subject: -- --- src/rabbit_trace.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 1f9fe932..0fb1faba 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -63,10 +63,10 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, %%---------------------------------------------------------------------------- start(VHost) -> - update_config(fun (VHosts) -> [VHost | lists:delete(VHost, VHosts)] end). + update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end). stop(VHost) -> - update_config(fun (VHosts) -> lists:delete(VHost, VHosts) end). + update_config(fun (VHosts) -> [VHosts -- [VHost]] end). update_config(Fun) -> {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS), -- cgit v1.2.1 From 1270b265d77799c97af7ec6f16a7637ed4caefda Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 20 May 2011 18:39:07 +0100 Subject: Critical bug fixed --- src/rabbit_mirror_queue_slave.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 7fc2c8cb..f065f667 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -829,7 +829,7 @@ process_instruction({sender_death, ChPid}, true = erlang:demonitor(MRef), KS1 = dict:erase(ChPid, KS), SQ1 = dict:erase(ChPid, SQ), - State #state { sender_queues = SQ1, known_senders = KS1} + State #state { sender_queues = SQ1, known_senders = KS1 } end}; process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, -- cgit v1.2.1 From 6be969542911167c767b4298c98c7a31ce505138 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 09:38:44 +0100 Subject: Fix dumb mistake. --- src/rabbit_trace.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 0fb1faba..172e8a7a 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -66,7 +66,7 @@ start(VHost) -> update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end). stop(VHost) -> - update_config(fun (VHosts) -> [VHosts -- [VHost]] end). + update_config(fun (VHosts) -> VHosts -- [VHost] end). update_config(Fun) -> {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS), -- cgit v1.2.1 From 29fcb934aae22257c38ffbb8a20834e180f628f4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 10:20:08 +0100 Subject: Actually use the exchange we have. Not very scientifically measured, but this seems to knock about 1s off the time to to "time MulticastMain -x 2 -y 0 -z 30" with tracing on (from ~82s to ~81s for me). --- src/rabbit_basic.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 91bdf826..9397905f 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -169,10 +169,10 @@ publish(Exchange, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, BodyBin) -> case exchange(Exchange) of X = #exchange{} -> - publish(delivery(Mandatory, Immediate, Txn, - message(X#exchange.name, RoutingKeyBin, - properties(Properties), BodyBin), - undefined)); + publish(X, delivery(Mandatory, Immediate, Txn, + message(X#exchange.name, RoutingKeyBin, + properties(Properties), BodyBin), + undefined)); _ -> {ok, unroutable, []} end. -- cgit v1.2.1 From 7a45bdea49398a76cd6018eb790ba55743eb7a93 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 10:25:02 +0100 Subject: Update docs. --- docs/rabbitmqctl.1.xml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 0e212f10..4d801ef1 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1269,17 +1269,13 @@ Message Tracing - start_tracing -p vhost exchange + trace_on -p vhost vhost The name of the virtual host for which to start tracing. - - value - The name of the exchange to which trace messages should be published. - Starts tracing. @@ -1288,7 +1284,7 @@ - stop_tracing -p vhost + trace_off -p vhost -- cgit v1.2.1 From 720455453f03f3a722290bd17a06fc1b1546b077 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 11:11:24 +0100 Subject: Don't display it as binary. --- src/rabbit_control.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 52cfac9b..e5251438 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -283,14 +283,14 @@ action(list_consumers, Node, _Args, Opts, Inform) -> end; action(trace_on, Node, [], Opts, Inform) -> - VHost = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Starting tracing for vhost ~p", [VHost]), - rpc_call(Node, rabbit_trace, start, [VHost]); + rpc_call(Node, rabbit_trace, start, [list_to_binary(VHost)]); action(trace_off, Node, [], Opts, Inform) -> - VHost = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Stopping tracing for vhost ~p", [VHost]), - rpc_call(Node, rabbit_trace, stop, [VHost]); + rpc_call(Node, rabbit_trace, stop, [list_to_binary(VHost)]); action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), -- cgit v1.2.1 From 792e267c9cffb409e9ea3d70af049e8eff04007e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 11:12:32 +0100 Subject: Abstract the types --- src/rabbit_basic.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 9397905f..cccd028a 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -32,6 +32,9 @@ ({ok, rabbit_router:routing_result(), [pid()]} | rabbit_types:error('not_found'))). +-type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())). +-type(body_input() :: (binary() | [binary()])). + -spec(publish/1 :: (rabbit_types:delivery()) -> publish_result()). -spec(delivery/5 :: @@ -48,14 +51,12 @@ -spec(properties/1 :: (properties_input()) -> rabbit_framing:amqp_property_record()). -spec(publish/4 :: - (rabbit_types:exchange() | rabbit_exchange:name(), - rabbit_router:routing_key(), properties_input(), - binary() | [binary()]) -> publish_result()). + (exchange_input(), rabbit_router:routing_key(), properties_input(), + body_input()) -> publish_result()). -spec(publish/7 :: - (rabbit_types:exchange() | rabbit_exchange:name(), - rabbit_router:routing_key(), boolean(), boolean(), + (exchange_input(), rabbit_router:routing_key(), boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), properties_input(), - binary() | [binary()]) -> publish_result()). + body_input()) -> publish_result()). -spec(build_content/2 :: (rabbit_framing:amqp_property_record(), binary() | [binary()]) -> rabbit_types:content()). -spec(from_content/1 :: (rabbit_types:content()) -> -- cgit v1.2.1 From a9512f75c00d7b36cf83f8197cdbb67e7f21d915 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 11:12:52 +0100 Subject: Rename and move things a bit --- src/rabbit_basic.erl | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index cccd028a..1c781727 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -73,10 +73,6 @@ publish(Delivery = #delivery{ Other -> Other end. -publish(X, Delivery) -> - {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), - {ok, RoutingRes, DeliveredQPids}. - delivery(Mandatory, Immediate, Txn, Message, MsgSeqNo) -> #delivery{mandatory = Mandatory, immediate = Immediate, txn = Txn, sender = self(), message = Message, msg_seq_no = MsgSeqNo}. @@ -160,28 +156,31 @@ indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1). %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. -publish(Exchange, RoutingKeyBin, Properties, BodyBin) -> +publish(Exchange, RoutingKeyBin, Properties, Body) -> publish(Exchange, RoutingKeyBin, false, false, none, Properties, - BodyBin). + Body). %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. -publish(Exchange, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, - BodyBin) -> +publish(Exchange, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, Body) -> case exchange(Exchange) of - X = #exchange{} -> + X = #exchange{name = XName} -> publish(X, delivery(Mandatory, Immediate, Txn, - message(X#exchange.name, RoutingKeyBin, - properties(Properties), BodyBin), + message(XName, RoutingKeyBin, + properties(Properties), Body), undefined)); - _ -> - {ok, unroutable, []} + Err -> + Err end. +publish(X, Delivery) -> + {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), + {ok, RoutingRes, DeliveredQPids}. + exchange(X = #exchange{}) -> X; -exchange(N = #resource{kind = exchange}) -> - case rabbit_exchange:lookup(N) of +exchange(XName = #resource{kind = exchange}) -> + case rabbit_exchange:lookup(XName) of {ok, X} -> X; Err -> Err end. -- cgit v1.2.1 From f6faad9903a5e9b612345bd3f790c998af75e188 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 11:17:18 +0100 Subject: Inline exchange/1, multihead publish/7 --- src/rabbit_basic.erl | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index 1c781727..fa7e3a5a 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -162,29 +162,21 @@ publish(Exchange, RoutingKeyBin, Properties, Body) -> %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. -publish(Exchange, RoutingKeyBin, Mandatory, Immediate, Txn, Properties, Body) -> - case exchange(Exchange) of - X = #exchange{name = XName} -> - publish(X, delivery(Mandatory, Immediate, Txn, - message(XName, RoutingKeyBin, - properties(Properties), Body), - undefined)); - Err -> - Err +publish(X = #exchange{name = XName}, RKey, Mandatory, Immediate, Txn, + Props, Body) -> + publish(X, delivery(Mandatory, Immediate, Txn, + message(XName, RKey, properties(Props), Body), + undefined)); +publish(XName, RKey, Mandatory, Immediate, Txn, Props, Body) -> + case rabbit_exchange:lookup(XName) of + {ok, X} -> publish(X, RKey, Mandatory, Immediate, Txn, Props, Body); + Err -> Err end. publish(X, Delivery) -> {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), {ok, RoutingRes, DeliveredQPids}. -exchange(X = #exchange{}) -> - X; -exchange(XName = #resource{kind = exchange}) -> - case rabbit_exchange:lookup(XName) of - {ok, X} -> X; - Err -> Err - end. - is_message_persistent(#content{properties = #'P_basic'{ delivery_mode = Mode}}) -> case Mode of -- cgit v1.2.1 From 6f0b94e30c826c9656eddbd8d9532506c7c033c7 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 12:18:07 +0100 Subject: Exclude mochiweb from the xref check. --- src/rabbit_prelaunch.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 2512a602..8ae86848 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -71,7 +71,9 @@ start() -> %% Compile the script ScriptFile = RootName ++ ".script", - case systools:make_script(RootName, [local, silent, exref]) of + %% We exclude mochiweb due to its optional use of fdsrv. + case systools:make_script(RootName, [local, silent, + {exref, AllApps -- [mochiweb]}]) of {ok, Module, Warnings} -> %% This gets lots of spurious no-source warnings when we %% have .ez files, so we want to supress them to prevent -- cgit v1.2.1 From 64d9405597387f8a3fcd81e27b861655134b4c6a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 12:22:21 +0100 Subject: Introduce variable --- src/rabbit_prelaunch.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 8ae86848..1dbff8c6 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -69,11 +69,13 @@ start() -> %% Write it out to $RABBITMQ_PLUGINS_EXPAND_DIR/rabbit.rel rabbit_misc:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])), + %% We exclude mochiweb due to its optional use of fdsrv. + Exclude = [mochiweb], + %% Compile the script ScriptFile = RootName ++ ".script", - %% We exclude mochiweb due to its optional use of fdsrv. case systools:make_script(RootName, [local, silent, - {exref, AllApps -- [mochiweb]}]) of + {exref, AllApps -- Exclude}]) of {ok, Module, Warnings} -> %% This gets lots of spurious no-source warnings when we %% have .ez files, so we want to supress them to prevent -- cgit v1.2.1 From 7ec3424dfeb4b0fb109c191815004566cea008e5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 12:25:05 +0100 Subject: Rename variable --- src/rabbit_prelaunch.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 1dbff8c6..35cb650e 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -70,12 +70,12 @@ start() -> rabbit_misc:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])), %% We exclude mochiweb due to its optional use of fdsrv. - Exclude = [mochiweb], + XRefExclude = [mochiweb], %% Compile the script ScriptFile = RootName ++ ".script", case systools:make_script(RootName, [local, silent, - {exref, AllApps -- Exclude}]) of + {exref, AllApps -- XRefExclude}]) of {ok, Module, Warnings} -> %% This gets lots of spurious no-source warnings when we %% have .ez files, so we want to supress them to prevent -- cgit v1.2.1 From fc6be65ada353aa98780a3b89139f18727c9492d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 12:39:49 +0100 Subject: Since we should not normally see warnings now, let's zhuzh them up. --- src/rabbit_prelaunch.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl index 35cb650e..92829e49 100644 --- a/src/rabbit_prelaunch.erl +++ b/src/rabbit_prelaunch.erl @@ -97,7 +97,8 @@ start() -> end]), case length(WarningStr) of 0 -> ok; - _ -> io:format("~s", [WarningStr]) + _ -> S = string:copies("*", 80), + io:format("~n~s~n~s~s~n~n", [S, WarningStr, S]) end, ok; {error, Module, Error} -> -- cgit v1.2.1 From cc4012012d860425781ea1e8da3f8ec6ce8c9c39 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 23 May 2011 13:27:31 +0100 Subject: master must broadcast, esp if it doesn't know about the sender. Also rip out the varying priority run_backing_queue* stuff as it turns out it's not needed --- src/rabbit_amqqueue.erl | 19 ++-------------- src/rabbit_amqqueue_process.erl | 44 ++++++++++++++++++-------------------- src/rabbit_mirror_queue_master.erl | 16 ++++---------- src/rabbit_mirror_queue_slave.erl | 28 +++++++++++------------- 4 files changed, 40 insertions(+), 67 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 0550f13b..8c374ef3 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -33,7 +33,6 @@ %% internal -export([internal_declare/2, internal_delete/1, run_backing_queue/3, run_backing_queue_async/3, - run_backing_queue/4, run_backing_queue_async/4, sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, set_maximum_since_use/2, maybe_expire/1, drop_expired/1, emit_stats/1]). @@ -150,14 +149,6 @@ -spec(run_backing_queue_async/3 :: (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). --spec(run_backing_queue/4 :: - (pid(), atom(), - (fun ((atom(), A) -> {[rabbit_types:msg_id()], A})), - integer() | 'default') -> 'ok'). --spec(run_backing_queue_async/4 :: - (pid(), atom(), - (fun ((atom(), A) -> {[rabbit_types:msg_id()], A})), - integer() | 'default') -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). @@ -457,16 +448,10 @@ internal_delete(QueueName) -> end). run_backing_queue(QPid, Mod, Fun) -> - run_backing_queue(QPid, Mod, Fun, default). + gen_server2:call(QPid, {run_backing_queue, Mod, Fun}, infinity). run_backing_queue_async(QPid, Mod, Fun) -> - run_backing_queue_async(QPid, Mod, Fun, default). - -run_backing_queue(QPid, Mod, Fun, Priority) -> - gen_server2:call(QPid, {run_backing_queue, Mod, Fun, Priority}, infinity). - -run_backing_queue_async(QPid, Mod, Fun, Priority) -> - gen_server2:cast(QPid, {run_backing_queue, Mod, Fun, Priority}). + gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). sync_timeout(QPid) -> gen_server2:cast(QPid, sync_timeout). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 7daf869b..ea31ec13 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -843,31 +843,29 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {run_backing_queue, _Mod, _Fun, default} -> 6; - {run_backing_queue, _Mod, _Fun, Priority} -> Priority; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {run_backing_queue, _Mod, _Fun} -> 6; + _ -> 0 end. prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; - delete_immediately -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; - maybe_expire -> 8; - drop_expired -> 8; - emit_stats -> 7; - {ack, _Txn, _AckTags, _ChPid} -> 7; - {reject, _AckTags, _Requeue, _ChPid} -> 7; - {notify_sent, _ChPid} -> 7; - {unblock, _ChPid} -> 7; - {run_backing_queue, _Mod, _Fun, default} -> 6; - {run_backing_queue, _Mod, _Fun, Priority} -> Priority; - sync_timeout -> 6; - _ -> 0 + update_ram_duration -> 8; + delete_immediately -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + {ack, _Txn, _AckTags, _ChPid} -> 7; + {reject, _AckTags, _Requeue, _ChPid} -> 7; + {notify_sent, _ChPid} -> 7; + {unblock, _ChPid} -> 7; + {run_backing_queue, _Mod, _Fun} -> 6; + sync_timeout -> 6; + _ -> 0 end. prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, @@ -1081,11 +1079,11 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> noreply(requeue_and_run(AckTags, State)) end; -handle_call({run_backing_queue, Mod, Fun, _Priority}, _From, State) -> +handle_call({run_backing_queue, Mod, Fun}, _From, State) -> reply(ok, run_backing_queue(Mod, Fun, State)). -handle_cast({run_backing_queue, Mod, Fun, _Priority}, State) -> +handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); handle_cast(sync_timeout, State) -> diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 0e7f32f0..78c771cc 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -62,22 +62,14 @@ stop() -> sender_death_fun() -> Self = self(), fun (DeadPid) -> - %% Purposefully set the priority to 0 here so that we - %% don't overtake any messages from DeadPid that are - %% already in the queue. rabbit_amqqueue:run_backing_queue_async( Self, ?MODULE, fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> rabbit_log:info("Master saw death of sender ~p~n", [DeadPid]), - case sets:is_element(DeadPid, KS) of - false -> - State; - true -> - ok = gm:broadcast(GM, {sender_death, DeadPid}), - KS1 = sets:del_element(DeadPid, KS), - State #state { known_senders = KS1 } - end - end, 0) + ok = gm:broadcast(GM, {sender_death, DeadPid}), + KS1 = sets:del_element(DeadPid, KS), + State #state { known_senders = KS1 } + end) end. init(#amqqueue { arguments = Args, name = QName } = Q, Recover, diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index f065f667..265657de 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -170,14 +170,14 @@ handle_call({gm_deaths, Deaths}, From, {stop, normal, State} end; -handle_call({run_backing_queue, Mod, Fun, _Priority}, _From, State) -> +handle_call({run_backing_queue, Mod, Fun}, _From, State) -> reply(ok, run_backing_queue(Mod, Fun, State)); handle_call({commit, _Txn, _ChPid}, _From, State) -> %% We don't support transactions in mirror queues reply(ok, State). -handle_cast({run_backing_queue, Mod, Fun, _Priority}, State) -> +handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); handle_cast({gm, Instruction}, State) -> @@ -265,23 +265,21 @@ handle_pre_hibernate(State = #state { backing_queue = BQ, prioritise_call(Msg, _From, _State) -> case Msg of - {run_backing_queue, _Mod, _Fun, default} -> 6; - {run_backing_queue, _Mod, _Fun, Priority} -> Priority; - {gm_deaths, _Deaths} -> 5; - _ -> 0 + {run_backing_queue, _Mod, _Fun} -> 6; + {gm_deaths, _Deaths} -> 5; + _ -> 0 end. prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; - {set_ram_duration_target, _Duration} -> 8; - {set_maximum_since_use, _Age} -> 8; - {run_backing_queue, _Mod, _Fun, default} -> 6; - {run_backing_queue, _Mod, _Fun, Priority} -> Priority; - sync_timeout -> 6; - {gm, _Msg} -> 5; - {post_commit, _Txn, _AckTags} -> 4; - _ -> 0 + update_ram_duration -> 8; + {set_ram_duration_target, _Duration} -> 8; + {set_maximum_since_use, _Age} -> 8; + {run_backing_queue, _Mod, _Fun} -> 6; + sync_timeout -> 6; + {gm, _Msg} -> 5; + {post_commit, _Txn, _AckTags} -> 4; + _ -> 0 end. %% --------------------------------------------------------------------------- -- cgit v1.2.1 From 1dab0e6045a444b91da762286d4f164f050dd6c7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 23 May 2011 13:29:35 +0100 Subject: Remove debug log entries --- src/rabbit_mirror_queue_master.erl | 1 - src/rabbit_mirror_queue_slave.erl | 1 - 2 files changed, 2 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 78c771cc..1d2b1676 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -65,7 +65,6 @@ sender_death_fun() -> rabbit_amqqueue:run_backing_queue_async( Self, ?MODULE, fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> - rabbit_log:info("Master saw death of sender ~p~n", [DeadPid]), ok = gm:broadcast(GM, {sender_death, DeadPid}), KS1 = sets:del_element(DeadPid, KS), State #state { known_senders = KS1 } diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 265657de..5c0730dd 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -819,7 +819,6 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, process_instruction({sender_death, ChPid}, State = #state { sender_queues = SQ, known_senders = KS }) -> - rabbit_log:info("Slave received death of sender ~p~n", [ChPid]), {ok, case dict:find(ChPid, KS) of error -> State; -- cgit v1.2.1 From 25b2dbe483f1e71efb080a0b7e8e92525b36adfb Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 23 May 2011 15:00:50 +0100 Subject: Updated documentation, and in the course of writing it, thought up another scenario I wasn't coping with. Fixed. However, not all documented causes of memory leaks are yet fixed in the code --- src/rabbit_mirror_queue_coordinator.erl | 109 ++++++++++++++++++++++++++++---- src/rabbit_mirror_queue_slave.erl | 69 ++++++++++---------- 2 files changed, 128 insertions(+), 50 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 5660112a..96d0e15b 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -158,20 +158,101 @@ %% as the bq, and the slave's bq as the master's bq. Thus the very %% same process that was the slave is now a full amqqueue_process. %% -%% In the event of channel failure, there is the possibility that a -%% msg that was being published only makes it to some of the -%% mirrors. If it makes it to the master, then the master will push -%% the entire message onto gm, and all the slaves will publish it to -%% their bq, even though they may not receive it directly from the -%% channel. This currently will create a small memory leak in the -%% slave's msg_id_status mapping as the slaves will expect that -%% eventually they'll receive the msg from the channel. If the message -%% does not make it to the master then the slaves that receive it will -%% hold onto the message, assuming it'll eventually appear via -%% gm. Again, this will currently result in a memory leak, though this -%% time, it's the entire message rather than tracking the status of -%% the message, which is potentially much worse. This may eventually -%% be solved by monitoring publishing channels in some way. +%% It is important that we avoid memory leaks due to the death of +%% senders (i.e. channels) and partial publications. A sender +%% publishing a message may fail mid way through the publish and thus +%% only some of the mirrors will receive the message. We need the +%% mirrors to be able to detect this and tidy up as necessary to avoid +%% leaks. If we just had the master monitoring all senders then we +%% would have the possibility that a sender appears and only sends the +%% message to a few of the slaves before dying. Those slaves would +%% then hold on to the message, assuming they'll receive some +%% instruction eventually from the master. Thus we have both slaves +%% and the master monitor all senders they become aware of. But there +%% is a race: if the slave receives a DOWN of a sender, how does it +%% know whether or not the master is going to send it instructions +%% regarding those messages? +%% +%% Whilst the master monitors senders, it can't access its mailbox +%% directly, so it delegates monitoring to the coordinator. When the +%% coordinator receives a DOWN message from a sender, it informs the +%% master via a callback. This allows the master to do any tidying +%% necessary, but more importantly allows the master to broadcast a +%% sender_death message to all the slaves, saying the sender has +%% died. Once the slaves receive the sender_death message, they know +%% that they're not going to receive any more instructions from the gm +%% regarding that sender, thus they throw away any publications from +%% the sender pending publication instructions. However, it is +%% possible that the coordinator receives the DOWN and communicates +%% that to the master before the master has finished receiving and +%% processing publishes from the sender. This turns out not to be a +%% problem: the sender has actually died, and so will not need to +%% receive confirms or other feedback, and should further messages be +%% "received" from the sender, the master will ask the coordinator to +%% set up a new monitor, and will continue to process the messages +%% normally. Slaves may thus receive publishes via gm from previously +%% declared "dead" senders, but again, this is fine: should the slave +%% have just thrown out the message it had received directly from the +%% sender (due to receiving a sender_death message via gm), it will be +%% able to cope with the publication purely from the master via gm. +%% +%% When a slave receives a DOWN message for a sender, if it has not +%% received the sender_death message from the master via gm already, +%% then it will wait 20 seconds before broadcasting a request for +%% confirmation from the master that the sender really has died. +%% Should a sender have only sent a publish to slaves, this allows +%% slaves to inform the master of the previous existence of the +%% sender. The master will thus monitor the sender, receive the DOWN, +%% and subsequently broadcast the sender_death message, allowing the +%% slaves to tidy up. This process can repeat for the same sender: +%% consider one slave receives the publication, then the DOWN, then +%% asks for confirmation of death, then the master broadcasts the +%% sender_death message. Only then does another slave receive the +%% publication and thus set up its monitoring. Eventually that slave +%% too will receive the DOWN, ask for confirmation and the master will +%% monitor the sender again, receive another DOWN, and send out +%% another sender_death message. Given the 20 second delay before +%% requesting death confirmation, this is highly unlikely, but it is a +%% possibility. +%% +%% When the 20 second timer expires, the slave first checks to see +%% whether it still needs confirmation of the death before requesting +%% it. This prevents unnecessary traffic on gm as it allows one +%% broadcast of the sender_death message to satisfy many slaves. +%% +%% If we consider the promotion of a slave at this point, we have two +%% possibilities: that of the slave that has received the DOWN and is +%% thus waiting for confirmation from the master that the sender +%% really is down; and that of the slave that has not received the +%% DOWN. In the first case, in the act of promotion to master, the new +%% master will monitor again the dead sender, and after it has +%% finished promoting itself, it should find another DOWN waiting, +%% which it will then broadcast. This will allow slaves to tidy up as +%% normal. In the second case, we have the possibility that +%% confirmation-of-sender-death request has been broadcast, but that +%% it was broadcast before the master failed, and that the slave being +%% promoted does not know anything about that sender, and so will not +%% monitor it on promotion. Thus a slave that broadcasts such a +%% request, at the point of broadcasting it, recurses, setting another +%% 20 second timer. As before, on expiry of the timer, the slaves +%% checks to see whether it still has not received a sender_death +%% message for the dead sender, and if not, broadcasts a death +%% confirmation request. Thus this ensures that even when a master +%% dies and the new slave has no knowledge of the dead sender, it will +%% eventually receive a death confirmation request, shall monitor the +%% dead sender, receive the DOWN and broadcast the sender_death +%% message. +%% +%% The preceding commentary deals with the possibility of slaves +%% receiving publications from senders which the master does not, and +%% the need to prevent memory leaks in such scenarios. The inverse is +%% also possible: a partial publication may cause only the master to +%% receive a publication. It will then publish the message via gm. The +%% slaves will receive it via gm, will publish it to their BQ and will +%% set up monitoring on the sender. They will then receive the DOWN +%% message and the master will eventually publish the corresponding +%% sender_death message. The slave will then be able to tidy up its +%% state as normal. %% %% We don't support transactions on mirror queues. To do so is %% challenging. The underlying bq is free to add the contents of the diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 5c0730dd..558e372e 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -569,44 +569,41 @@ ensure_monitoring(ChPid, State = #state { known_senders = KS }) -> end. local_sender_death(ChPid, State = #state { known_senders = KS }) -> - case dict:is_key(ChPid, KS) of - false -> - ok; - true -> - %% We have to deal with the possibility that we'll be - %% promoted to master before this thing gets - %% run. Consequently we set the module to - %% rabbit_mirror_queue_master so that if we do become a - %% rabbit_amqqueue_process before then, sane things will - %% happen. - Fun = - fun (?MODULE, State1 = #state { known_senders = KS1, - gm = GM }) -> - %% We're running still as a slave - ok = case dict:is_key(ChPid, KS1) of - false -> - ok; - true -> - gm:broadcast( - GM, {ensure_monitoring, [ChPid]}) - end, - State1; - (rabbit_mirror_queue_master, State1) -> - %% We've become a master. State1 is now opaque - %% to us. When we became master, if ChPid was - %% still known to us then we'd have set up - %% monitoring of it then, so this is now a - %% noop. - State1 - end, - %% Note that we do not remove our knowledge of this ChPid - %% until we get the sender_death from GM. - timer:apply_after( - ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue_async, - [self(), rabbit_mirror_queue_master, Fun]) - end, + ok = case dict:is_key(ChPid, KS) of + false -> ok; + true -> confirm_sender_death(ChPid) + end, State. +confirm_sender_death(Pid) -> + %% We have to deal with the possibility that we'll be promoted to + %% master before this thing gets run. Consequently we set the + %% module to rabbit_mirror_queue_master so that if we do become a + %% rabbit_amqqueue_process before then, sane things will happen. + Fun = + fun (?MODULE, State = #state { known_senders = KS, + gm = GM }) -> + %% We're running still as a slave + ok = case dict:is_key(Pid, KS) of + false -> ok; + true -> gm:broadcast(GM, {ensure_monitoring, [Pid]}), + confirm_sender_death(Pid) + end, + State; + (rabbit_mirror_queue_master, State) -> + %% We've become a master. State is now opaque to + %% us. When we became master, if Pid was still known + %% to us then we'd have set up monitoring of it then, + %% so this is now a noop. + State + end, + %% Note that we do not remove our knowledge of this ChPid until we + %% get the sender_death from GM. + {ok, _TRef} = timer:apply_after( + ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue_async, + [self(), rabbit_mirror_queue_master, Fun]), + ok. + maybe_enqueue_message( Delivery = #delivery { message = #basic_message { id = MsgId }, msg_seq_no = MsgSeqNo, -- cgit v1.2.1 From 26097dd85c3bf96fc6ddf8f5d7f79b8e42571984 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 23 May 2011 15:06:10 +0100 Subject: Add a "tracing" column to the list_vhosts command. A bigger change than expected, as vhosts didn't have the standard info item infrastructure. --- docs/rabbitmqctl.1.xml | 23 ++++++++++++++++++++--- src/rabbit_control.erl | 5 +++-- src/rabbit_trace.erl | 10 +++++++--- src/rabbit_vhost.erl | 23 +++++++++++++++++++++++ 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 4d801ef1..ffa01894 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -624,14 +624,31 @@ - - list_vhosts + + list_vhosts vhostinfoitem ... Lists virtual hosts. + + The vhostinfoitem parameter is used to indicate which + virtual host information items to include in the results. The column order in the + results will match the order of the parameters. + vhostinfoitem can take any value from + the list that follows: + + + + name + The name of the virtual host with non-ASCII characters escaped as in C. + + + tracing + Whether tracing is enabled for this virtual host. + + For example: - rabbitmqctl list_vhosts + rabbitmqctl list_vhosts name tracing This command instructs the RabbitMQ broker to list all virtual hosts. diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index e5251438..8172f804 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -221,9 +221,10 @@ action(delete_vhost, Node, Args = [_VHostPath], _Opts, Inform) -> Inform("Deleting vhost ~p", Args), call(Node, {rabbit_vhost, delete, Args}); -action(list_vhosts, Node, [], _Opts, Inform) -> +action(list_vhosts, Node, Args, _Opts, Inform) -> Inform("Listing vhosts", []), - display_list(call(Node, {rabbit_vhost, list, []})); + ArgAtoms = default_if_empty(Args, [name]), + display_info_list(call(Node, {rabbit_vhost, info_all, []}), ArgAtoms); action(list_user_permissions, Node, Args = [_Username], _Opts, Inform) -> Inform("Listing permissions for user ~p", Args), diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl index 172e8a7a..7d36856a 100644 --- a/src/rabbit_trace.erl +++ b/src/rabbit_trace.erl @@ -16,7 +16,7 @@ -module(rabbit_trace). --export([init/1, tap_trace_in/2, tap_trace_out/2, start/1, stop/1]). +-export([init/1, tracing/1, tap_trace_in/2, tap_trace_out/2, start/1, stop/1]). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). @@ -31,6 +31,7 @@ -type(state() :: rabbit_types:exchange() | 'none'). -spec(init/1 :: (rabbit_types:vhost()) -> state()). +-spec(tracing/1 :: (rabbit_types:vhost()) -> boolean()). -spec(tap_trace_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok'). -spec(tap_trace_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok'). @@ -42,14 +43,17 @@ %%---------------------------------------------------------------------------- init(VHost) -> - {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS), - case lists:member(VHost, VHosts) of + case tracing(VHost) of false -> none; true -> {ok, X} = rabbit_exchange:lookup( rabbit_misc:r(VHost, exchange, ?XNAME)), X end. +tracing(VHost) -> + {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS), + lists:member(VHost, VHosts). + tap_trace_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, TraceX) -> maybe_trace(TraceX, Msg, <<"publish">>, XName, []). diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl index 67c73cf2..5270d80b 100644 --- a/src/rabbit_vhost.erl +++ b/src/rabbit_vhost.erl @@ -21,6 +21,7 @@ %%---------------------------------------------------------------------------- -export([add/1, delete/1, exists/1, list/0, with/2]). +-export([info/1, info/2, info_all/0, info_all/1]). -ifdef(use_specs). @@ -30,10 +31,18 @@ -spec(list/0 :: () -> [rabbit_types:vhost()]). -spec(with/2 :: (rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A). +-spec(info/1 :: (rabbit_types:vhost()) -> rabbit_types:infos()). +-spec(info/2 :: (rabbit_types:vhost(), rabbit_types:info_keys()) + -> rabbit_types:infos()). +-spec(info_all/0 :: () -> [rabbit_types:infos()]). +-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). + -endif. %%---------------------------------------------------------------------------- +-define(INFO_KEYS, [name, tracing]). + add(VHostPath) -> R = rabbit_misc:execute_mnesia_transaction( fun () -> @@ -105,3 +114,17 @@ with(VHostPath, Thunk) -> Thunk() end end. + +%%---------------------------------------------------------------------------- + +infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items]. + +i(name, VHost) -> VHost; +i(tracing, VHost) -> rabbit_trace:tracing(VHost); +i(Item, _) -> throw({bad_argument, Item}). + +info(VHost) -> infos(?INFO_KEYS, VHost). +info(VHost, Items) -> infos(Items, VHost). + +info_all() -> info_all(?INFO_KEYS). +info_all(Items) -> [info(VHost, Items) || VHost <- list()]. -- cgit v1.2.1 From 6195f8752ee4b2775c3697c0862969d9a4e7005a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 23 May 2011 16:45:27 +0100 Subject: Fix the remaining memory leak --- src/rabbit_mirror_queue_slave.erl | 172 +++++++++++++++++++++----------------- 1 file changed, 93 insertions(+), 79 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 558e372e..b6aaecb7 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -59,7 +59,7 @@ sync_timer_ref, rate_timer_ref, - sender_queues, %% :: Pid -> MsgQ + sender_queues, %% :: Pid -> {Q {Msg, Bool}, Set MsgId} msg_id_ack, %% :: MsgId -> AckTag ack_num, @@ -500,7 +500,7 @@ promote_me(From, #state { q = Q, {MsgId, {published, ChPid, MsgSeqNo}} <- dict:to_list(MS)]), NumAckTags = [NumAckTag || {_MsgId, NumAckTag} <- dict:to_list(MA)], AckTags = [AckTag || {_Num, AckTag} <- lists:sort(NumAckTags)], - Deliveries = [Delivery || {_ChPid, PubQ} <- dict:to_list(SQ), + Deliveries = [Delivery || {_ChPid, {PubQ, _PendCh}} <- dict:to_list(SQ), {Delivery, true} <- queue:to_list(PubQ)], QueueState = rabbit_amqqueue_process:init_with_backing_queue_state( Q1, rabbit_mirror_queue_master, MasterState, RateTRef, @@ -610,47 +610,65 @@ maybe_enqueue_message( sender = ChPid, txn = none }, EnqueueOnPromotion, - State = #state { sender_queues = SQ, - msg_id_status = MS }) -> + State = #state { sender_queues = SQ, msg_id_status = MS }) -> State1 = ensure_monitoring(ChPid, State), %% We will never see {published, ChPid, MsgSeqNo} here. case dict:find(MsgId, MS) of error -> - MQ = case dict:find(ChPid, SQ) of - {ok, MQ1} -> MQ1; - error -> queue:new() - end, - SQ1 = dict:store(ChPid, - queue:in({Delivery, EnqueueOnPromotion}, MQ), SQ), + {MQ, PendingCh} = get_sender_queue(ChPid, SQ), + MQ1 = queue:in({Delivery, EnqueueOnPromotion}, MQ), + SQ1 = dict:store(ChPid, {MQ1, PendingCh}, SQ), State1 #state { sender_queues = SQ1 }; {ok, {confirmed, ChPid}} -> %% BQ has confirmed it but we didn't know what the %% msg_seq_no was at the time. We do now! ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), - State1 #state { msg_id_status = dict:erase(MsgId, MS) }; + SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ), + State1 #state { sender_queues = SQ1, + msg_id_status = dict:erase(MsgId, MS) }; {ok, {published, ChPid}} -> %% It was published to the BQ and we didn't know the %% msg_seq_no so couldn't confirm it at the time. case needs_confirming(Delivery, State1) of never -> - State1 #state { msg_id_status = dict:erase(MsgId, MS) }; + SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ), + State1 #state { msg_id_status = dict:erase(MsgId, MS), + sender_queues = SQ1 }; eventually -> State1 #state { msg_id_status = dict:store(MsgId, {published, ChPid, MsgSeqNo}, MS) }; immediately -> ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), - State1 #state { msg_id_status = dict:erase(MsgId, MS) } + SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ), + State1 #state { msg_id_status = dict:erase(MsgId, MS), + sender_queues = SQ1 } end; {ok, discarded} -> %% We've already heard from GM that the msg is to be %% discarded. We won't see this again. - State1 #state { msg_id_status = dict:erase(MsgId, MS) } + SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ), + State1 #state { msg_id_status = dict:erase(MsgId, MS), + sender_queues = SQ1 } end; maybe_enqueue_message(_Delivery, _EnqueueOnPromotion, State) -> %% We don't support txns in mirror queues. State. +get_sender_queue(ChPid, SQ) -> + case dict:find(ChPid, SQ) of + error -> {queue:new(), sets:new()}; + {ok, Val} -> Val + end. + +remove_from_pending_ch(MsgId, ChPid, SQ) -> + case dict:find(ChPid, SQ) of + error -> + SQ; + {ok, {MQ, PendingCh}} -> + dict:store(ChPid, {MQ, sets:del_element(MsgId, PendingCh)}, SQ) + end. + process_instruction( {publish, Deliver, ChPid, MsgProps, Msg = #basic_message { id = MsgId }}, State = #state { sender_queues = SQ, @@ -667,46 +685,39 @@ process_instruction( %% that we've seen the msg_id confirmed until we can associate it %% with a msg_seq_no. State1 = ensure_monitoring(ChPid, State), - MS1 = dict:store(MsgId, {published, ChPid}, MS), - {SQ1, MS2} = - case dict:find(ChPid, SQ) of - error -> - {SQ, MS1}; - {ok, MQ} -> - case queue:out(MQ) of - {empty, _MQ} -> - {SQ, MS1}; - {{value, {Delivery = #delivery { - msg_seq_no = MsgSeqNo, - message = #basic_message { id = MsgId } }, - _EnqueueOnPromotion}}, MQ1} -> - %% We received the msg from the channel - %% first. Thus we need to deal with confirms - %% here. - {dict:store(ChPid, MQ1, SQ), - case needs_confirming(Delivery, State1) of - never -> - MS; - eventually -> - dict:store( - MsgId, {published, ChPid, MsgSeqNo}, MS); - immediately -> - ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), - MS - end}; - {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ1} -> - %% The instruction was sent to us before we - %% were within the mirror_pids within the - %% #amqqueue{} record. We'll never receive the - %% message directly from the channel. And the - %% channel will not be expecting any confirms - %% from us. - {SQ, MS} - end + {MQ, PendingCh} = get_sender_queue(ChPid, SQ), + {MQ1, PendingCh1, MS1} = + case queue:out(MQ) of + {empty, _MQ2} -> + {MQ, sets:add_element(MsgId, PendingCh), + dict:store(MsgId, {published, ChPid}, MS)}; + {{value, {Delivery = #delivery { + msg_seq_no = MsgSeqNo, + message = #basic_message { id = MsgId } }, + _EnqueueOnPromotion}}, MQ2} -> + %% We received the msg from the channel first. Thus we + %% need to deal with confirms here. + case needs_confirming(Delivery, State1) of + never -> + {MQ2, PendingCh, MS}; + eventually -> + {MQ2, sets:add_element(MsgId, PendingCh), + dict:store(MsgId, {published, ChPid, MsgSeqNo}, MS)}; + immediately -> + ok = rabbit_channel:confirm(ChPid, [MsgSeqNo]), + {MQ2, PendingCh, MS} + end; + {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ2} -> + %% The instruction was sent to us before we were + %% within the mirror_pids within the #amqqueue{} + %% record. We'll never receive the message directly + %% from the channel. And the channel will not be + %% expecting any confirms from us. + {MQ, PendingCh, MS} end, - State2 = State1 #state { sender_queues = SQ1, - msg_id_status = MS2 }, + SQ1 = dict:store(ChPid, {MQ1, PendingCh1}, SQ), + State2 = State1 #state { sender_queues = SQ1, msg_id_status = MS1 }, {ok, case Deliver of @@ -727,33 +738,28 @@ process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, %% Many of the comments around the publish head above apply here %% too. State1 = ensure_monitoring(ChPid, State), - MS1 = dict:store(MsgId, discarded, MS), - {SQ1, MS2} = - case dict:find(ChPid, SQ) of - error -> - {SQ, MS1}; - {ok, MQ} -> - case queue:out(MQ) of - {empty, _MQ} -> - {SQ, MS1}; - {{value, {#delivery { - message = #basic_message { id = MsgId } }, - _EnqueueOnPromotion}}, MQ1} -> - %% We've already seen it from the channel, - %% we're not going to see this again, so don't - %% add it to MS - {dict:store(ChPid, MQ1, SQ), MS}; - {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ1} -> - %% The instruction was sent to us before we - %% were within the mirror_pids within the - %% #amqqueue{} record. We'll never receive the - %% message directly from the channel. - {SQ, MS} - end + {MQ, PendingCh} = get_sender_queue(ChPid, SQ), + {MQ1, PendingCh1, MS1} = + case queue:out(MQ) of + {empty, _MQ} -> + {MQ, sets:add_element(MsgId, PendingCh), + dict:store(MsgId, discarded, MS)}; + {{value, {#delivery { message = #basic_message { id = MsgId } }, + _EnqueueOnPromotion}}, MQ2} -> + %% We've already seen it from the channel, we're not + %% going to see this again, so don't add it to MS + {MQ2, PendingCh, MS}; + {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ2} -> + %% The instruction was sent to us before we were + %% within the mirror_pids within the #amqqueue{} + %% record. We'll never receive the message directly + %% from the channel. + {MQ, PendingCh, MS} end, + SQ1 = dict:store(ChPid, {MQ1, PendingCh1}, SQ), BQS1 = BQ:discard(Msg, ChPid, BQS), {ok, State1 #state { sender_queues = SQ1, - msg_id_status = MS2, + msg_id_status = MS1, backing_queue_state = BQS1 }}; process_instruction({set_length, Length}, State = #state { backing_queue = BQ, @@ -815,15 +821,23 @@ process_instruction({requeue, MsgPropsFun, MsgIds}, end}; process_instruction({sender_death, ChPid}, State = #state { sender_queues = SQ, + msg_id_status = MS, known_senders = KS }) -> {ok, case dict:find(ChPid, KS) of error -> State; {ok, MRef} -> true = erlang:demonitor(MRef), - KS1 = dict:erase(ChPid, KS), - SQ1 = dict:erase(ChPid, SQ), - State #state { sender_queues = SQ1, known_senders = KS1 } + MS1 = case dict:find(ChPid, SQ) of + error -> + MS; + {ok, {_MQ, PendingCh}} -> + lists:foldl(fun dict:erase/2, MS, + sets:to_list(PendingCh)) + end, + State #state { sender_queues = dict:erase(ChPid, SQ), + msg_id_status = MS1, + known_senders = dict:erase(ChPid, KS) } end}; process_instruction(delete_and_terminate, State = #state { backing_queue = BQ, -- cgit v1.2.1 From cbb2e20ff65999293be4bcbc08bdee588a731435 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 23 May 2011 16:48:34 +0100 Subject: Update comment --- src/rabbit_mirror_queue_slave.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index b6aaecb7..c7ff4480 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -337,7 +337,7 @@ bq_init(BQ, Q, Recover) -> run_backing_queue(rabbit_mirror_queue_master, Fun, State) -> %% Yes, this might look a little crazy, but see comments in - %% local_sender_death/2 + %% confirm_sender_death/1 Fun(?MODULE, State); run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> -- cgit v1.2.1 From d99801020fa148777d1a3586eab0ea32e9119ab4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 23 May 2011 16:50:48 +0100 Subject: Add pointer back to bug given the value of the additional commentary in the bug --- src/rabbit_mirror_queue_coordinator.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 96d0e15b..ee849088 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -304,6 +304,8 @@ %% the last seen state of the queue: checking length alone is not %% sufficient in this case. %% +%% For more documentation see the comments in bug 23554. +%% %%---------------------------------------------------------------------------- start_link(Queue, GM, DeathFun) -> -- cgit v1.2.1 From b4963dd7aae39e8f8c5306b9d39af6deb7623e63 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 24 May 2011 12:50:58 +0100 Subject: Whitespace --- src/rabbit_mirror_queue_master.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 1d2b1676..99de1b18 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -254,32 +254,32 @@ requeue(AckTags, MsgPropsFun, State = #state { gm = GM, ok = gm:broadcast(GM, {requeue, MsgPropsFun, MsgIds}), {MsgIds, State #state { backing_queue_state = BQS1 }}. -len(#state { backing_queue = BQ, backing_queue_state = BQS}) -> +len(#state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:len(BQS). -is_empty(#state { backing_queue = BQ, backing_queue_state = BQS}) -> +is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:is_empty(BQS). set_ram_duration_target(Target, State = #state { backing_queue = BQ, - backing_queue_state = BQS}) -> + backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:set_ram_duration_target(Target, BQS) }. -ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> +ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> {Result, BQS1} = BQ:ram_duration(BQS), {Result, State #state { backing_queue_state = BQS1 }}. -needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS}) -> +needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:needs_timeout(BQS). -timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS}) -> +timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:timeout(BQS) }. handle_pre_hibernate(State = #state { backing_queue = BQ, - backing_queue_state = BQS}) -> + backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }. -status(#state { backing_queue = BQ, backing_queue_state = BQS}) -> +status(#state { backing_queue = BQ, backing_queue_state = BQS }) -> BQ:status(BQS). invoke(?MODULE, Fun, State) -> -- cgit v1.2.1 From f0cb7e165ced609d9909b3ef9b528442f63dc658 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 24 May 2011 17:38:14 +0100 Subject: Permit dropping nodes of mirrored queues. This turns out to be much much messier than I'd hoped as the principle problem becomes ensuring an add after a drop works. Normally, an add would only occur on a node that has not seen said queue before: if it had, in a previous lifetime, then the booting of rabbit would have ripped out any locally stored files regarding that queue. But now this step may be missed. Having tried many different approaches, the simplest became expanding bq so that the shutdown reason is exposed to the BQ. Thus both slave and master can then detect that they're being dropped, and, in the case of master, it can convert a bq:terminate to a bq:delete_and_terminate. Every other approach I could think of turned out worse. --- docs/rabbitmqctl.1.xml | 43 ++++++++++++++++++ include/rabbit_backing_queue_spec.hrl | 4 +- src/rabbit_amqqueue_process.erl | 12 ++--- src/rabbit_backing_queue.erl | 4 +- src/rabbit_control.erl | 6 +++ src/rabbit_mirror_queue_master.erl | 27 ++++++++---- src/rabbit_mirror_queue_misc.erl | 83 ++++++++++++++++++++++------------- src/rabbit_mirror_queue_slave.erl | 11 ++++- src/rabbit_mirror_queue_slave_sup.erl | 2 +- src/rabbit_tests.erl | 8 ++-- src/rabbit_variable_queue.erl | 6 +-- 11 files changed, 147 insertions(+), 59 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 62869158..908ca973 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1325,6 +1325,49 @@ + + + Mirrored Queue Management + + Mirrored queues can have slaves dynamically added, and slaves + or the master dynamically dropped. Refer to the High Availability + guide for further details about mirrored queues in + general. + + + + + add_queue_mirror queue_name node + + + Attempts to add a mirror of the queue + queue_name on + node. This will only succeed if the + queue was declared a mirrored queue and if there is no + mirror of the queue already on the node. If it succeeds, + the new mirror will start off as an empty slave. + + + + + + drop_queue_mirror queue_name node + + + Attempts to drop a mirror of the queue + queue_name on + node. This will only succeed if the + queue was declared a mirrored queue and if there is a + mirror of the queue already on the node. If the node + contains the master of the queue, a slave on some other + node will be promoted to become the new master. It is + not permitted to drop the only node of a mirrored-queue. + + + + + diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 1c2b94e2..295d9039 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -32,8 +32,8 @@ -spec(stop/0 :: () -> 'ok'). -spec(init/4 :: (rabbit_types:amqqueue(), attempt_recovery(), async_callback(), sync_callback()) -> state()). --spec(terminate/1 :: (state()) -> state()). --spec(delete_and_terminate/1 :: (state()) -> state()). +-spec(terminate/2 :: (any(), state()) -> state()). +-spec(delete_and_terminate/2 :: (any(), state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). -spec(publish/4 :: (rabbit_types:basic_message(), rabbit_types:message_properties(), pid(), state()) -> diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index ea31ec13..b1c95338 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -145,16 +145,16 @@ init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS, fun (Delivery, StateN) -> deliver_or_enqueue(Delivery, StateN) end, State, Deliveries). -terminate(shutdown, State = #q{backing_queue = BQ}) -> - terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); -terminate({shutdown, _}, State = #q{backing_queue = BQ}) -> - terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); -terminate(_Reason, State = #q{backing_queue = BQ}) -> +terminate(shutdown = R, State = #q{backing_queue = BQ}) -> + terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State); +terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) -> + terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State); +terminate(Reason, State = #q{backing_queue = BQ}) -> %% FIXME: How do we cancel active subscriptions? terminate_shutdown(fun (BQS) -> rabbit_event:notify( queue_deleted, [{pid, self()}]), - BQS1 = BQ:delete_and_terminate(BQS), + BQS1 = BQ:delete_and_terminate(Reason, BQS), %% don't care if the internal delete %% doesn't return 'ok'. rabbit_amqqueue:internal_delete(qname(State)), diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index addaabc5..217ad3eb 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -49,11 +49,11 @@ behaviour_info(callbacks) -> {init, 4}, %% Called on queue shutdown when queue isn't being deleted. - {terminate, 1}, + {terminate, 2}, %% Called when the queue is terminating and needs to delete all %% its content. - {delete_and_terminate, 1}, + {delete_and_terminate, 2}, %% Remove all messages in the queue, but not messages which have %% been fetched and are pending acks. diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 1140a2f0..b4b6255e 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -244,6 +244,12 @@ action(add_queue_mirror, Node, [Queue, MirrorNode], Opts, Inform) -> rpc_call(Node, rabbit_mirror_queue_misc, add_slave, [VHostArg, list_to_binary(Queue), list_to_atom(MirrorNode)]); +action(drop_queue_mirror, Node, [Queue, MirrorNode], Opts, Inform) -> + Inform("Dropping mirror of queue ~p on node ~p~n", [Queue, MirrorNode]), + VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), + rpc_call(Node, rabbit_mirror_queue_misc, drop_slave, + [VHostArg, list_to_binary(Queue), list_to_atom(MirrorNode)]); + action(list_exchanges, Node, Args, Opts, Inform) -> Inform("Listing exchanges", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 99de1b18..9bd8565f 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -16,7 +16,7 @@ -module(rabbit_mirror_queue_master). --export([init/4, terminate/1, delete_and_terminate/1, +-export([init/4, terminate/2, delete_and_terminate/2, purge/1, publish/4, publish_delivered/5, fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, drain_confirmed/1, dropwhile/2, @@ -106,17 +106,28 @@ promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, KS) -> ack_msg_id = dict:new(), known_senders = sets:from_list(KS) }. -terminate(State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> +terminate({shutdown, dropped} = Reason, + State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> + %% Backing queue termination - this node has been explicitly + %% dropped. Normally, non-durable queues would be tidied up on + %% startup, but there's a possibility that we will be added back + %% in without this node being restarted. Thus we must do the full + %% blown delete_and_terminate now, but only locally: we do not + %% broadcast delete_and_terminate. + State #state { backing_queue_state = BQ:delete_and_terminate(Reason, BQS), + set_delivered = 0 }; +terminate(Reason, + State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination. The queue is going down but %% shouldn't be deleted. Most likely safe shutdown of this %% node. Thus just let some other slave take over. - State #state { backing_queue_state = BQ:terminate(BQS) }. + State #state { backing_queue_state = BQ:terminate(Reason, BQS) }. -delete_and_terminate(State = #state { gm = GM, - backing_queue = BQ, - backing_queue_state = BQS }) -> - ok = gm:broadcast(GM, delete_and_terminate), - State #state { backing_queue_state = BQ:delete_and_terminate(BQS), +delete_and_terminate(Reason, State = #state { gm = GM, + backing_queue = BQ, + backing_queue_state = BQS }) -> + ok = gm:broadcast(GM, {delete_and_terminate, Reason}), + State #state { backing_queue_state = BQ:delete_and_terminate(Reason, BQS), set_delivered = 0 }. purge(State = #state { gm = GM, diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 5f180c5e..046d3380 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -16,7 +16,8 @@ -module(rabbit_mirror_queue_misc). --export([remove_from_queue/2, add_slave/2, add_slave/3, on_node_up/0]). +-export([remove_from_queue/2, on_node_up/0, + drop_slave/2, drop_slave/3, add_slave/2, add_slave/3]). -include("rabbit.hrl"). @@ -59,36 +60,6 @@ remove_from_queue(QueueName, DeadPids) -> end end). -add_slave(VHostPath, QueueName, MirrorNode) -> - add_slave(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). - -add_slave(Queue, MirrorNode) -> - rabbit_amqqueue:with( - Queue, - fun (#amqqueue { arguments = Args, name = Name, - pid = QPid, mirror_pids = MPids } = Q) -> - case rabbit_misc:table_lookup(Args, <<"x-mirror">>) of - undefined -> - ok; - _ -> - case [MirrorNode || Pid <- [QPid | MPids], - node(Pid) =:= MirrorNode] of - [] -> - Result = - rabbit_mirror_queue_slave_sup:start_child( - MirrorNode, [Q]), - rabbit_log:info("Adding slave node for ~s: ~p~n", - [rabbit_misc:rs(Name), Result]), - case Result of - {ok, _Pid} -> ok; - _ -> Result - end; - [_] -> - {error, queue_already_mirrored_on_node} - end - end - end). - on_node_up() -> Qs = rabbit_misc:execute_mnesia_transaction( @@ -113,3 +84,53 @@ on_node_up() -> end), [add_slave(Q, node()) || Q <- Qs], ok. + +drop_slave(VHostPath, QueueName, MirrorNode) -> + drop_slave(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). + +drop_slave(Queue, MirrorNode) -> + if_mirrored_queue( + Queue, + fun (#amqqueue { name = Name, pid = QPid, mirror_pids = MPids }) -> + case [Pid || Pid <- [QPid | MPids], node(Pid) =:= MirrorNode] of + [] -> + {error, {queue_not_mirrored_on_node, MirrorNode}}; + [QPid | MPids] -> + {error, cannot_drop_only_mirror}; + [Pid] -> + rabbit_log:info("Dropping slave node on ~p for ~s~n", + [MirrorNode, rabbit_misc:rs(Name)]), + exit(Pid, {shutdown, dropped}), + ok + end + end). + +add_slave(VHostPath, QueueName, MirrorNode) -> + add_slave(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). + +add_slave(Queue, MirrorNode) -> + if_mirrored_queue( + Queue, + fun (#amqqueue { name = Name, pid = QPid, mirror_pids = MPids } = Q) -> + case [Pid || Pid <- [QPid | MPids], node(Pid) =:= MirrorNode] of + [] -> Result = rabbit_mirror_queue_slave_sup:start_child( + MirrorNode, [Q]), + rabbit_log:info( + "Adding slave node for ~s on node ~p: ~p~n", + [rabbit_misc:rs(Name), MirrorNode, Result]), + case Result of + {ok, _Pid} -> ok; + _ -> Result + end; + [_] -> {error, {queue_already_mirrored_on_node, MirrorNode}} + end + end). + +if_mirrored_queue(Queue, Fun) -> + rabbit_amqqueue:with( + Queue, fun (#amqqueue { arguments = Args } = Q) -> + case rabbit_misc:table_lookup(Args, <<"x-mirror">>) of + undefined -> ok; + _ -> Fun(Q) + end + end). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index c7ff4480..666687a5 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -226,6 +226,9 @@ handle_info({'DOWN', _MonitorRef, process, MPid, _Reason}, handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) -> noreply(local_sender_death(ChPid, State)); +handle_info({'EXIT', _Pid, Reason}, State) -> + {stop, Reason, State}; + handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}. @@ -238,6 +241,10 @@ terminate(_Reason, #state { backing_queue_state = undefined }) -> %% We've received a delete_and_terminate from gm, thus nothing to %% do here. ok; +terminate({shutdown, dropped} = R, #state { backing_queue = BQ, + backing_queue_state = BQS }) -> + %% See rabbit_mirror_queue_master:terminate/2 + BQ:delete_and_terminate(R, BQS); terminate(Reason, #state { q = Q, gm = GM, backing_queue = BQ, @@ -839,10 +846,10 @@ process_instruction({sender_death, ChPid}, msg_id_status = MS1, known_senders = dict:erase(ChPid, KS) } end}; -process_instruction(delete_and_terminate, +process_instruction({delete_and_terminate, Reason}, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> - BQ:delete_and_terminate(BQS), + BQ:delete_and_terminate(Reason, BQS), {stop, State #state { backing_queue_state = undefined }}. msg_ids_to_acktags(MsgIds, MA) -> diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl index 25ee1fd0..2ce5941e 100644 --- a/src/rabbit_mirror_queue_slave_sup.erl +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -40,7 +40,7 @@ start() -> {ok, _} = - supervisor:start_child( + supervisor2:start_child( rabbit_sup, {rabbit_mirror_queue_slave_sup, {rabbit_mirror_queue_slave_sup, start_link, []}, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 1a37cdff..3f4aa54e 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2116,7 +2116,7 @@ with_fresh_variable_queue(Fun) -> {delta, {delta, undefined, 0, undefined}}, {q3, 0}, {q4, 0}, {len, 0}]), - _ = rabbit_variable_queue:delete_and_terminate(Fun(VQ)), + _ = rabbit_variable_queue:delete_and_terminate(shutdown, Fun(VQ)), passed. test_variable_queue() -> @@ -2284,7 +2284,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> Count + Count, VQ3), {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), - _VQ6 = rabbit_variable_queue:terminate(VQ5), + _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), VQ7 = variable_queue_init(test_amqqueue(true), true), {{_Msg1, true, _AckTag1, Count1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), @@ -2301,7 +2301,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> {_Guids, VQ4} = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:timeout(VQ4), - _VQ6 = rabbit_variable_queue:terminate(VQ5), + _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), VQ7 = variable_queue_init(test_amqqueue(true), true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), VQ8. @@ -2336,7 +2336,7 @@ test_queue_recover() -> VQ1 = variable_queue_init(Q, true), {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), - _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), + _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2), rabbit_amqqueue:internal_delete(QName) end), passed. diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 8ac3ad43..a167cca0 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -16,7 +16,7 @@ -module(rabbit_variable_queue). --export([init/4, terminate/1, delete_and_terminate/1, +-export([init/4, terminate/2, delete_and_terminate/2, purge/1, publish/4, publish_delivered/5, drain_confirmed/1, fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, @@ -452,7 +452,7 @@ init(#amqqueue { name = QueueName, durable = true }, true, init(true, IndexState, DeltaCount, Terms1, AsyncCallback, SyncCallback, PersistentClient, TransientClient). -terminate(State) -> +terminate(_Reason, State) -> State1 = #vqstate { persistent_count = PCount, index_state = IndexState, msg_store_clients = {MSCStateP, MSCStateT} } = @@ -473,7 +473,7 @@ terminate(State) -> %% the only difference between purge and delete is that delete also %% needs to delete everything that's been delivered and not ack'd. -delete_and_terminate(State) -> +delete_and_terminate(_Reason, State) -> %% TODO: there is no need to interact with qi at all - which we do %% as part of 'purge' and 'remove_pending_ack', other than %% deleting it. -- cgit v1.2.1 From 0e0bcf22aad825811ccb7b633bfcfa134dde3d0e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 12:05:08 +0100 Subject: Enforce checking of x-mirror arg --- src/rabbit_amqqueue.erl | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 8c374ef3..50f5a9da 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -312,7 +312,8 @@ check_declare_arguments(QueueName, Args) -> [Key, rabbit_misc:rs(QueueName), Error]) end || {Key, Fun} <- [{<<"x-expires">>, fun check_integer_argument/1}, - {<<"x-message-ttl">>, fun check_integer_argument/1}]], + {<<"x-message-ttl">>, fun check_integer_argument/1}, + {<<"x-mirror">>, fun check_array_of_longstr_argument/1}]], ok. check_integer_argument(undefined) -> @@ -325,6 +326,18 @@ check_integer_argument({Type, Val}) when Val > 0 -> check_integer_argument({_Type, Val}) -> {error, {value_zero_or_less, Val}}. +check_array_of_longstr_argument(undefined) -> + ok; +check_array_of_longstr_argument({array, Array}) -> + case lists:all(fun ({longstr, _NodeName}) -> true; + (_) -> false + end, Array) of + true -> ok; + false -> {error, {array_contains_non_longstrs, Array}} + end; +check_array_of_longstr_argument({Type, _Val}) -> + {error, {unacceptable_type, Type}}. + list(VHostPath) -> mnesia:dirty_match_object( rabbit_queue, -- cgit v1.2.1 From ed73a0d3c85aa3bd0fe7226f4c3c7de0f1452f02 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 12:35:21 +0100 Subject: Not quite sure how I managed to get that quite so wrong... --- src/rabbit_amqqueue.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 50f5a9da..268199e5 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -259,8 +259,7 @@ with(Name, F, E) -> {ok, Q = #amqqueue{mirror_pids = []}} -> rabbit_misc:with_exit_handler(E, fun () -> F(Q) end); {ok, Q} -> - timer:sleep(25), - E1 = fun () -> with(Name, F, E) end, + E1 = fun () -> timer:sleep(25), with(Name, F, E) end, rabbit_misc:with_exit_handler(E1, fun () -> F(Q) end); {error, not_found} -> E() -- cgit v1.2.1 From 6a3be4ce63e9e92f47e8299d4282ab111818315a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 13:23:21 +0100 Subject: correct use of nodes() --- src/rabbit_mirror_queue_master.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 9bd8565f..da12ea82 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -77,11 +77,11 @@ init(#amqqueue { arguments = Args, name = QName } = Q, Recover, Q, undefined, sender_death_fun()), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), - Nodes1 = case Nodes of - [] -> nodes(); - _ -> [list_to_atom(binary_to_list(Node)) || - {longstr, Node} <- Nodes] - end, + Nodes1 = (case Nodes of + [] -> rabbit_mnesia:all_clustered_nodes(); + _ -> [list_to_atom(binary_to_list(Node)) || + {longstr, Node} <- Nodes] + end) -- [node()], [rabbit_mirror_queue_misc:add_slave(QName, Node) || Node <- Nodes1], {ok, BQ} = application:get_env(backing_queue_module), BQS = BQ:init(Q, Recover, AsyncCallback, SyncCallback), -- cgit v1.2.1 From 36f68e916ff7319e027b7545d987ecd920284324 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 13:32:06 +0100 Subject: enforce equivalence checking of x-mirror arg --- src/rabbit_amqqueue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 268199e5..f9e84443 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -300,7 +300,7 @@ with_exclusive_access_or_die(Name, ReaderPid, F) -> assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, RequiredArgs) -> rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName, - [<<"x-expires">>]). + [<<"x-expires">>, <<"x-mirror">>]). check_declare_arguments(QueueName, Args) -> [case Fun(rabbit_misc:table_lookup(Args, Key)) of -- cgit v1.2.1 From 6a8b341e4c4bd6a7f3c08f005416defc20077b91 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 13:42:34 +0100 Subject: Work in gm table creation as part of the normal upgrade steps, and then assume that it'll continue to exist --- src/rabbit_mirror_queue_coordinator.erl | 1 - src/rabbit_mirror_queue_slave.erl | 1 - src/rabbit_upgrade_functions.erl | 22 ++++++++++++++-------- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index ee849088..2727c1d0 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -324,7 +324,6 @@ ensure_monitoring(CPid, Pids) -> init([#amqqueue { name = QueueName } = Q, GM, DeathFun]) -> GM1 = case GM of undefined -> - ok = gm:create_tables(), {ok, GM2} = gm:start_link(QueueName, ?MODULE, [self()]), receive {joined, GM2, _Members} -> ok diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 666687a5..678926af 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -75,7 +75,6 @@ set_maximum_since_use(QPid, Age) -> init([#amqqueue { name = QueueName } = Q]) -> process_flag(trap_exit, true), %% amqqueue_process traps exits too. - ok = gm:create_tables(), {ok, GM} = gm:start_link(QueueName, ?MODULE, [self()]), receive {joined, GM} -> ok diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index a6f02a0e..04744aa4 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -30,21 +30,23 @@ -rabbit_upgrade({exchange_event_serial, mnesia, []}). -rabbit_upgrade({trace_exchanges, mnesia, []}). -rabbit_upgrade({mirror_pids, mnesia, []}). +-rabbit_upgrade({gm, mnesia, []}). %% ------------------------------------------------------------------- -ifdef(use_specs). --spec(remove_user_scope/0 :: () -> 'ok'). --spec(hash_passwords/0 :: () -> 'ok'). --spec(add_ip_to_listener/0 :: () -> 'ok'). --spec(internal_exchanges/0 :: () -> 'ok'). +-spec(remove_user_scope/0 :: () -> 'ok'). +-spec(hash_passwords/0 :: () -> 'ok'). +-spec(add_ip_to_listener/0 :: () -> 'ok'). +-spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). --spec(topic_trie/0 :: () -> 'ok'). +-spec(topic_trie/0 :: () -> 'ok'). -spec(exchange_event_serial/0 :: () -> 'ok'). --spec(semi_durable_route/0 :: () -> 'ok'). --spec(trace_exchanges/0 :: () -> 'ok'). --spec(mirror_pids/0 :: () -> 'ok'). +-spec(semi_durable_route/0 :: () -> 'ok'). +-spec(trace_exchanges/0 :: () -> 'ok'). +-spec(mirror_pids/0 :: () -> 'ok'). +-spec(gm/0 :: () -> 'ok'). -endif. @@ -136,6 +138,10 @@ mirror_pids() -> || T <- Tables ], ok. +gm() -> + create(gm_group, [{record_name, gm_group}, + {attributes, [name, version, members]}]). + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 82e0c98e9752fb06328ff2d59b60b890cb1716a0 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 13:50:18 +0100 Subject: Assert equivalence of x-message-ttl --- src/rabbit_amqqueue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e58e67ad..d029ff1d 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -292,7 +292,7 @@ with_exclusive_access_or_die(Name, ReaderPid, F) -> assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, RequiredArgs) -> rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName, - [<<"x-expires">>]). + [<<"x-expires">>, <<"x-message-ttl">>]). check_declare_arguments(QueueName, Args) -> [case Fun(rabbit_misc:table_lookup(Args, Key)) of -- cgit v1.2.1 From 3fc13012637011e0565b2eb7e782d209512a5906 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 15:45:21 +0100 Subject: R14B03 moved hostname and ip_port out of inet.hrl and into inet.erl where they export_type'd them. This is a backwards incompatible change. Thus there's little alternative to bumping the erts requirement for using specs --- Makefile | 4 ++-- src/rabbit_networking.erl | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index cdb86aad..e376b4ac 100644 --- a/Makefile +++ b/Makefile @@ -41,8 +41,8 @@ RABBIT_PLT=rabbit.plt ifndef USE_SPECS # our type specs rely on features and bug fixes in dialyzer that are -# only available in R14A upwards (R14A is erts 5.8) -USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,8]), halt().') +# only available in R14B03 upwards (R14B03 is erts 5.8.4) +USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,8,4]), halt().') endif #other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 53be0190..451e56e8 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -43,6 +43,9 @@ -export_type([ip_port/0, hostname/0]). +-type(hostname() :: inet:hostname()). +-type(ip_port() :: inet:ip_port()). + -type(family() :: atom()). -type(listener_config() :: ip_port() | {hostname(), ip_port()} | -- cgit v1.2.1 From cf7d7556ceb76ee2bdaa4a31cdd3bef129bac920 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 25 May 2011 17:01:39 +0100 Subject: Remove R13ism --- src/rabbit_mirror_queue_slave.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 678926af..c5f83c24 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -386,9 +386,8 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) -> Acc end end, {MS, gb_trees:empty()}, MsgIds), - gb_trees:map(fun (ChPid, MsgSeqNos) -> - ok = rabbit_channel:confirm(ChPid, MsgSeqNos) - end, CMs), + [ok = rabbit_channel:confirm(ChPid, MsgSeqNos) + || {ChPid, MsgSeqNos} <- gb_trees:to_list(CMs)], State #state { msg_id_status = MS1 }. gb_trees_cons(Key, Value, Tree) -> -- cgit v1.2.1 From 5dee225836413562c0717932acfee8b9348048c8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 26 May 2011 17:40:26 +0100 Subject: What on earth were those namespaces imported for? They're not used for anything, and they help the build to fail if we can't contact docbook.org. --- docs/usage.xsl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/usage.xsl b/docs/usage.xsl index a6cebd93..586f8303 100644 --- a/docs/usage.xsl +++ b/docs/usage.xsl @@ -1,9 +1,5 @@ -- cgit v1.2.1 From ba598037cc46d056276a19c6b5d8acf3c103e790 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 26 May 2011 18:04:00 +0100 Subject: Don't validate against the docbook DTD when building the usage erl - we don't need docbook installed to build the server, but without this --novalid we instead go to the internet if docbook is not installed, which can make building flaky. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e376b4ac..a27d3138 100644 --- a/Makefile +++ b/Makefile @@ -242,7 +242,7 @@ distclean: clean # Do not fold the cp into previous line, it's there to stop the file being # generated but empty if we fail $(SOURCE_DIR)/%_usage.erl: - xsltproc --stringparam modulename "`basename $@ .erl`" \ + xsltproc --novalid --stringparam modulename "`basename $@ .erl`" \ $(DOCS_DIR)/usage.xsl $< > $@.tmp sed -e 's/"/\\"/g' -e 's/%QUOTE%/"/g' $@.tmp > $@.tmp2 fold -s $@.tmp2 > $@.tmp3 -- cgit v1.2.1 From 7e12686d597f02df55f21790b0667ba6e8dc109a Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 26 May 2011 18:07:38 +0100 Subject: rabbitmqctl report --- docs/rabbitmqctl.1.xml | 20 +++++++++++++++++++- src/rabbit.erl | 2 ++ src/rabbit_consumer.erl | 42 ++++++++++++++++++++++++++++++++++++++++++ src/rabbit_control.erl | 33 ++++++++++++++++++++++++--------- src/rabbit_networking.erl | 26 ++++++++++++-------------- src/rabbit_tests.erl | 2 +- 6 files changed, 100 insertions(+), 25 deletions(-) create mode 100644 src/rabbit_consumer.erl diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index ffa01894..d034e02d 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1259,7 +1259,7 @@ - list_consumers + list_consumers-p vhostpath List consumers, i.e. subscriptions to a queue's message @@ -1279,6 +1279,24 @@ + + report + + + Generate a server status report containing a concatenation of all server status + information for support purposes. The output should be redirected to a + file when accompanying a support request. + + + For example: + + rabbitmqctl report > server_report.txt + + This command creates a server report which may be attached to a + support request email. + + + diff --git a/src/rabbit.erl b/src/rabbit.erl index e6e80b4a..7b507ff1 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -217,6 +217,8 @@ stop_and_halt() -> status() -> [{pid, list_to_integer(os:getpid())}, + {os, os:type()}, + {erlang_version, erlang:system_info(system_version)}, {running_applications, application:which_applications()}] ++ rabbit_mnesia:status(). diff --git a/src/rabbit_consumer.erl b/src/rabbit_consumer.erl new file mode 100644 index 00000000..1c9d1064 --- /dev/null +++ b/src/rabbit_consumer.erl @@ -0,0 +1,42 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_consumer). + +-export([info_all/1]). + +-include("rabbit.hrl"). + +%%---------------------------------------------------------------------------- + +-ifdef(use_specs). + +-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). + +-endif. + +%%---------------------------------------------------------------------------- + +info_all(VHostPath) -> + [[{queue_name, QName#resource.name}, + {channel_pid, ChPid}, + {consumer_tag, ConsumerTag}, + {ack_required, AckRequired}] || + #amqqueue{pid=QPid, name=QName} <- rabbit_amqqueue:list(VHostPath), + {ChPid, ConsumerTag, AckRequired} <- + delegate:invoke(QPid, fun (P) -> + gen_server2:call(P, consumers, infinity) + end)]. diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8172f804..8ced9dd6 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -260,7 +260,7 @@ action(list_bindings, Node, Args, Opts, Inform) -> action(list_connections, Node, Args, _Opts, Inform) -> Inform("Listing connections", []), ArgAtoms = default_if_empty(Args, [user, peer_address, peer_port, state]), - display_info_list(rpc_call(Node, rabbit_networking, connection_info_all, + display_info_list(rpc_call(Node, rabbit_networking, info_all, [ArgAtoms]), ArgAtoms); @@ -275,13 +275,8 @@ action(list_consumers, Node, _Args, Opts, Inform) -> Inform("Listing consumers", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), InfoKeys = [queue_name, channel_pid, consumer_tag, ack_required], - case rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]) of - L when is_list(L) -> display_info_list( - [lists:zip(InfoKeys, tuple_to_list(X)) || - X <- L], - InfoKeys); - Other -> Other - end; + display_info_list(rpc_call(Node, rabbit_consumer, info_all, [VHostArg]), + InfoKeys); action(trace_on, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), @@ -309,7 +304,27 @@ action(list_permissions, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost ~p", [VHost]), display_list(call(Node, {rabbit_auth_backend_internal, - list_vhost_permissions, [VHost]})). + list_vhost_permissions, [VHost]})); + +action(report, Node, _Args, _Opts, Inform) -> + io:format("Reporting server status on ~p~n", [erlang:universaltime()]), + [action(status, ClusteredNode, [], [], Inform) || + ClusteredNode <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], + Report = fun (Module, VHostArg) -> + io:format("%% ~p~n", [[Module] ++ VHostArg]), + case Results = rpc_call(Node, Module, info_all, VHostArg) of + [Row|_] -> {InfoItems,_} = lists:unzip(Row), + display_info_list(Results, InfoItems); + _ -> ok + end + end, + GlobalQueries = [rabbit_networking, rabbit_channel], + VHostQueries = [rabbit_amqqueue, rabbit_exchange, rabbit_binding, + rabbit_consumer], + [Report(M, []) || M <- GlobalQueries], + [Report(M, [V]) || V <- rpc_call(Node, rabbit_vhost, list, []), + M <- VHostQueries], + ok. %%---------------------------------------------------------------------------- diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 451e56e8..72442aaa 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -18,10 +18,8 @@ -export([boot/0, start/0, start_tcp_listener/1, start_ssl_listener/2, stop_tcp_listener/1, on_node_down/1, active_listeners/0, - node_listeners/1, connections/0, connection_info_keys/0, - connection_info/1, connection_info/2, - connection_info_all/0, connection_info_all/1, - close_connection/2]). + node_listeners/1, connections/0, info_keys/0, info/1, info/2, + info_all/0, info_all/1, close_connection/2]). %%used by TCP-based transports, e.g. STOMP adapter -export([check_tcp_listener_address/2, @@ -59,14 +57,14 @@ -spec(active_listeners/0 :: () -> [rabbit_types:listener()]). -spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]). -spec(connections/0 :: () -> [rabbit_types:connection()]). --spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()). --spec(connection_info/1 :: +-spec(info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(info/1 :: (rabbit_types:connection()) -> rabbit_types:infos()). --spec(connection_info/2 :: +-spec(info/2 :: (rabbit_types:connection(), rabbit_types:info_keys()) -> rabbit_types:infos()). --spec(connection_info_all/0 :: () -> [rabbit_types:infos()]). --spec(connection_info_all/1 :: +-spec(info_all/0 :: () -> [rabbit_types:infos()]). +-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). -spec(close_connection/2 :: (pid(), string()) -> 'ok'). -spec(on_node_down/1 :: (node()) -> 'ok'). @@ -275,13 +273,13 @@ connections() -> {_, ConnSup, supervisor, _} <- supervisor:which_children({rabbit_tcp_client_sup, Node})]. -connection_info_keys() -> rabbit_reader:info_keys(). +info_keys() -> rabbit_reader:info_keys(). -connection_info(Pid) -> rabbit_reader:info(Pid). -connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items). +info(Pid) -> rabbit_reader:info(Pid). +info(Pid, Items) -> rabbit_reader:info(Pid, Items). -connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end). -connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end). +info_all() -> cmap(fun (Q) -> info(Q) end). +info_all(Items) -> cmap(fun (Q) -> info(Q, Items) end). close_connection(Pid, Explanation) -> case lists:member(Pid, connections()) of diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 1a37cdff..dce94c56 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1162,7 +1162,7 @@ test_server_status() -> {ok, _C} = gen_tcp:connect(H, P, []), timer:sleep(100), ok = info_action(list_connections, - rabbit_networking:connection_info_keys(), false), + rabbit_networking:info_keys(), false), %% close_connection [ConnPid] = rabbit_networking:connections(), ok = control_action(close_connection, [rabbit_misc:pid_to_string(ConnPid), -- cgit v1.2.1 From 8e67acc47eb955b5222334280bd153936cec94ba Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 27 May 2011 09:30:50 +0100 Subject: Rollback info_all renaming --- src/rabbit_control.erl | 16 ++++++++++------ src/rabbit_networking.erl | 26 ++++++++++++++------------ src/rabbit_tests.erl | 2 +- 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8ced9dd6..ef5fd420 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -260,7 +260,7 @@ action(list_bindings, Node, Args, Opts, Inform) -> action(list_connections, Node, Args, _Opts, Inform) -> Inform("Listing connections", []), ArgAtoms = default_if_empty(Args, [user, peer_address, peer_port, state]), - display_info_list(rpc_call(Node, rabbit_networking, info_all, + display_info_list(rpc_call(Node, rabbit_networking, connection_info_all, [ArgAtoms]), ArgAtoms); @@ -275,8 +275,13 @@ action(list_consumers, Node, _Args, Opts, Inform) -> Inform("Listing consumers", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), InfoKeys = [queue_name, channel_pid, consumer_tag, ack_required], - display_info_list(rpc_call(Node, rabbit_consumer, info_all, [VHostArg]), - InfoKeys); + case rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]) of + L when is_list(L) -> display_info_list( + [lists:zip(InfoKeys, tuple_to_list(X)) || + X <- L], + InfoKeys); + Other -> Other + end; action(trace_on, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), @@ -318,9 +323,8 @@ action(report, Node, _Args, _Opts, Inform) -> _ -> ok end end, - GlobalQueries = [rabbit_networking, rabbit_channel], - VHostQueries = [rabbit_amqqueue, rabbit_exchange, rabbit_binding, - rabbit_consumer], + GlobalQueries = [rabbit_channel], + VHostQueries = [rabbit_amqqueue, rabbit_exchange, rabbit_binding], [Report(M, []) || M <- GlobalQueries], [Report(M, [V]) || V <- rpc_call(Node, rabbit_vhost, list, []), M <- VHostQueries], diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl index 72442aaa..451e56e8 100644 --- a/src/rabbit_networking.erl +++ b/src/rabbit_networking.erl @@ -18,8 +18,10 @@ -export([boot/0, start/0, start_tcp_listener/1, start_ssl_listener/2, stop_tcp_listener/1, on_node_down/1, active_listeners/0, - node_listeners/1, connections/0, info_keys/0, info/1, info/2, - info_all/0, info_all/1, close_connection/2]). + node_listeners/1, connections/0, connection_info_keys/0, + connection_info/1, connection_info/2, + connection_info_all/0, connection_info_all/1, + close_connection/2]). %%used by TCP-based transports, e.g. STOMP adapter -export([check_tcp_listener_address/2, @@ -57,14 +59,14 @@ -spec(active_listeners/0 :: () -> [rabbit_types:listener()]). -spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]). -spec(connections/0 :: () -> [rabbit_types:connection()]). --spec(info_keys/0 :: () -> rabbit_types:info_keys()). --spec(info/1 :: +-spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(connection_info/1 :: (rabbit_types:connection()) -> rabbit_types:infos()). --spec(info/2 :: +-spec(connection_info/2 :: (rabbit_types:connection(), rabbit_types:info_keys()) -> rabbit_types:infos()). --spec(info_all/0 :: () -> [rabbit_types:infos()]). --spec(info_all/1 :: +-spec(connection_info_all/0 :: () -> [rabbit_types:infos()]). +-spec(connection_info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). -spec(close_connection/2 :: (pid(), string()) -> 'ok'). -spec(on_node_down/1 :: (node()) -> 'ok'). @@ -273,13 +275,13 @@ connections() -> {_, ConnSup, supervisor, _} <- supervisor:which_children({rabbit_tcp_client_sup, Node})]. -info_keys() -> rabbit_reader:info_keys(). +connection_info_keys() -> rabbit_reader:info_keys(). -info(Pid) -> rabbit_reader:info(Pid). -info(Pid, Items) -> rabbit_reader:info(Pid, Items). +connection_info(Pid) -> rabbit_reader:info(Pid). +connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items). -info_all() -> cmap(fun (Q) -> info(Q) end). -info_all(Items) -> cmap(fun (Q) -> info(Q, Items) end). +connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end). +connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end). close_connection(Pid, Explanation) -> case lists:member(Pid, connections()) of diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index dce94c56..1a37cdff 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1162,7 +1162,7 @@ test_server_status() -> {ok, _C} = gen_tcp:connect(H, P, []), timer:sleep(100), ok = info_action(list_connections, - rabbit_networking:info_keys(), false), + rabbit_networking:connection_info_keys(), false), %% close_connection [ConnPid] = rabbit_networking:connections(), ok = control_action(close_connection, [rabbit_misc:pid_to_string(ConnPid), -- cgit v1.2.1 From 2ea7604c16e0da630bdae45aef8e102af80c0ce4 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 27 May 2011 09:33:14 +0100 Subject: Remove rabbit_consumer.erl --- src/rabbit_consumer.erl | 42 ------------------------------------------ 1 file changed, 42 deletions(-) delete mode 100644 src/rabbit_consumer.erl diff --git a/src/rabbit_consumer.erl b/src/rabbit_consumer.erl deleted file mode 100644 index 1c9d1064..00000000 --- a/src/rabbit_consumer.erl +++ /dev/null @@ -1,42 +0,0 @@ -%% The contents of this file are subject to the Mozilla Public License -%% Version 1.1 (the "License"); you may not use this file except in -%% compliance with the License. You may obtain a copy of the License -%% at http://www.mozilla.org/MPL/ -%% -%% Software distributed under the License is distributed on an "AS IS" -%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See -%% the License for the specific language governing rights and -%% limitations under the License. -%% -%% The Original Code is RabbitMQ. -%% -%% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. -%% - --module(rabbit_consumer). - --export([info_all/1]). - --include("rabbit.hrl"). - -%%---------------------------------------------------------------------------- - --ifdef(use_specs). - --spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). - --endif. - -%%---------------------------------------------------------------------------- - -info_all(VHostPath) -> - [[{queue_name, QName#resource.name}, - {channel_pid, ChPid}, - {consumer_tag, ConsumerTag}, - {ack_required, AckRequired}] || - #amqqueue{pid=QPid, name=QName} <- rabbit_amqqueue:list(VHostPath), - {ChPid, ConsumerTag, AckRequired} <- - delegate:invoke(QPid, fun (P) -> - gen_server2:call(P, consumers, infinity) - end)]. -- cgit v1.2.1 From 0789c270868aef8676bc5e1c389f6051c48b8b00 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 27 May 2011 12:17:06 +0100 Subject: In light of bug21782, solve the asymmetry problem the other way. --- Makefile | 2 +- generate_app | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 3a40f606..c5d754ca 100644 --- a/Makefile +++ b/Makefile @@ -94,7 +94,7 @@ $(DEPS_FILE): $(SOURCES) $(INCLUDES) echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR) $(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app - escript generate_app $@ $(SOURCE_DIR) < $< + escript generate_app $< $@ $(SOURCE_DIR) $(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE) erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $< diff --git a/generate_app b/generate_app index d8813542..fb0eb1ea 100644 --- a/generate_app +++ b/generate_app @@ -1,16 +1,16 @@ #!/usr/bin/env escript %% -*- erlang -*- -main([TargetFile | SrcDirs]) -> +main([InFile, OutFile | SrcDirs]) -> Modules = [list_to_atom(filename:basename(F, ".erl")) || SrcDir <- SrcDirs, F <- filelib:wildcard("*.erl", SrcDir)], - {ok, {application, Application, Properties}} = io:read(''), + {ok, [{application, Application, Properties}]} = file:consult(InFile), NewProperties = case proplists:get_value(modules, Properties) of [] -> lists:keyreplace(modules, 1, Properties, {modules, Modules}); _ -> Properties end, file:write_file( - TargetFile, + OutFile, io_lib:format("~p.~n", [{application, Application, NewProperties}])). -- cgit v1.2.1 From 78665867a6d8e1b574bfc9d680f27022c18bdcc1 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 27 May 2011 12:46:39 +0100 Subject: rabbitmqctl report listing consumers more consistently --- src/rabbit_amqqueue.erl | 12 ++++++++++-- src/rabbit_control.erl | 49 ++++++++++++++++++++++++++++--------------------- 2 files changed, 38 insertions(+), 23 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index d029ff1d..023eef49 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -22,7 +22,7 @@ check_exclusive_access/2, with_exclusive_access_or_die/3, stat/1, deliver/2, requeue/3, ack/4, reject/4]). -export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]). --export([consumers/1, consumers_all/1]). +-export([consumers/1, consumers_all/1, consumer_info_keys/0]). -export([basic_get/3, basic_consume/7, basic_cancel/4]). -export([notify_sent/2, unblock/2, flush_all/2]). -export([commit_all/3, rollback_all/3, notify_down_all/2, limit_all/3]). @@ -92,6 +92,7 @@ -spec(consumers/1 :: (rabbit_types:amqqueue()) -> [{pid(), rabbit_types:ctag(), boolean()}]). +-spec(consumer_info_keys/0 :: () -> rabbit_types:info_keys()). -spec(consumers_all/1 :: (rabbit_types:vhost()) -> [{name(), pid(), rabbit_types:ctag(), boolean()}]). @@ -159,6 +160,9 @@ %%---------------------------------------------------------------------------- +-define(CONSUMER_INFO_KEYS, + [queue_name, channel_pid, consumer_tag, ack_required]). + start() -> DurableQueues = find_durable_queues(), {ok, BQ} = application:get_env(rabbit, backing_queue_module), @@ -341,10 +345,14 @@ info_all(VHostPath, Items) -> map(VHostPath, fun (Q) -> info(Q, Items) end). consumers(#amqqueue{ pid = QPid }) -> delegate_call(QPid, consumers). +consumer_info_keys() -> ?CONSUMER_INFO_KEYS. + consumers_all(VHostPath) -> lists:append( map(VHostPath, - fun (Q) -> [{Q#amqqueue.name, ChPid, ConsumerTag, AckRequired} || + fun (Q) -> + [lists:zip(consumer_info_keys(), + [Q#amqqueue.name, ChPid, ConsumerTag, AckRequired]) || {ChPid, ConsumerTag, AckRequired} <- consumers(Q)] end)). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index ef5fd420..64343849 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -274,14 +274,8 @@ action(list_channels, Node, Args, _Opts, Inform) -> action(list_consumers, Node, _Args, Opts, Inform) -> Inform("Listing consumers", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), - InfoKeys = [queue_name, channel_pid, consumer_tag, ack_required], - case rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]) of - L when is_list(L) -> display_info_list( - [lists:zip(InfoKeys, tuple_to_list(X)) || - X <- L], - InfoKeys); - Other -> Other - end; + display_info_list(rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]), + rabbit_amqqueue:consumer_info_keys()); action(trace_on, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), @@ -313,21 +307,34 @@ action(list_permissions, Node, [], Opts, Inform) -> action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), - [action(status, ClusteredNode, [], [], Inform) || - ClusteredNode <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], - Report = fun (Module, VHostArg) -> - io:format("%% ~p~n", [[Module] ++ VHostArg]), - case Results = rpc_call(Node, Module, info_all, VHostArg) of - [Row|_] -> {InfoItems,_} = lists:unzip(Row), - display_info_list(Results, InfoItems); - _ -> ok + [action(status, ClusterNode, [], [], Inform) || + ClusterNode <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], + Report = fun ({Descr, Module, InfoFun, KeysFun}, VHostArg) -> + io:format("%% ~p~n", [[Descr] ++ VHostArg]), + case Results = rpc_call(Node, Module, InfoFun, VHostArg) of + [_|_] -> InfoItems = rpc_call(Node, Module, KeysFun, []), + display_info_list(Results, InfoItems); + _ -> ok end end, - GlobalQueries = [rabbit_channel], - VHostQueries = [rabbit_amqqueue, rabbit_exchange, rabbit_binding], - [Report(M, []) || M <- GlobalQueries], - [Report(M, [V]) || V <- rpc_call(Node, rabbit_vhost, list, []), - M <- VHostQueries], + GlobalQueries = [{"connections", rabbit_networking, connection_info_all, + connection_info_keys}, + {"channels", rabbit_channel, connection_info_all, + info_keys}], + VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys}, + {"exchanges", rabbit_exchange, info_all, info_keys}, + {"bindings", rabbit_binding, info_all, info_keys}, + {"consumers", rabbit_amqqueue, consumers_all, + consumer_info_keys}], + VHosts = rpc_call(Node, rabbit_vhost, list, []), + [Report(Q, []) || Q <- GlobalQueries], + [Report(Q, [V]) || V <- VHosts, Q <- VHostQueries], + [begin + io:format("%% ~p~n", [["permissions" | [VHost]]]), + display_list(call(Node, + {rabbit_auth_backend_internal, list_vhost_permissions, + [binary_to_list(VHost)]})) + end || VHost <- VHosts], ok. %%---------------------------------------------------------------------------- -- cgit v1.2.1 From f669d257d056228becc58f1603190cfa8a619bb3 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 27 May 2011 14:04:40 +0100 Subject: Add headers --- src/rabbit_control.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 64343849..53dd117e 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -313,14 +313,14 @@ action(report, Node, _Args, _Opts, Inform) -> io:format("%% ~p~n", [[Descr] ++ VHostArg]), case Results = rpc_call(Node, Module, InfoFun, VHostArg) of [_|_] -> InfoItems = rpc_call(Node, Module, KeysFun, []), + display_row([atom_to_list(I) || I <- InfoItems]), display_info_list(Results, InfoItems); _ -> ok end end, GlobalQueries = [{"connections", rabbit_networking, connection_info_all, connection_info_keys}, - {"channels", rabbit_channel, connection_info_all, - info_keys}], + {"channels", rabbit_channel, info_all, info_keys}], VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys}, {"exchanges", rabbit_exchange, info_all, info_keys}, {"bindings", rabbit_binding, info_all, info_keys}, -- cgit v1.2.1 From 97f0be9123efe4c40a2ae2e5e2bcc04f7635f1f5 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 31 May 2011 10:58:20 +0100 Subject: Optimisation while listing consumers --- src/rabbit_amqqueue.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 023eef49..f9ed3edc 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -348,10 +348,11 @@ consumers(#amqqueue{ pid = QPid }) -> consumer_info_keys() -> ?CONSUMER_INFO_KEYS. consumers_all(VHostPath) -> + ConsumerInfoKeys=consumer_info_keys(), lists:append( map(VHostPath, fun (Q) -> - [lists:zip(consumer_info_keys(), + [lists:zip(ConsumerInfoKeys, [Q#amqqueue.name, ChPid, ConsumerTag, AckRequired]) || {ChPid, ConsumerTag, AckRequired} <- consumers(Q)] end)). -- cgit v1.2.1 From 2d38bce657b47c4081c60a2c29f1e0b756f61ac9 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 31 May 2011 11:04:05 +0100 Subject: Remove more pointless namespaces. --- docs/examples-to-end.xsl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/examples-to-end.xsl b/docs/examples-to-end.xsl index d9686ada..a0a74178 100644 --- a/docs/examples-to-end.xsl +++ b/docs/examples-to-end.xsl @@ -1,9 +1,5 @@ -- cgit v1.2.1 From 03e22bf0d868d03974edeafb18d3bda22074e7eb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 31 May 2011 11:08:01 +0100 Subject: These "--novalid"s are less meaningful in the context of bug 24137 since you need to have docbook installed to get this far and so can't trigger the bug. But it can't harm to put them in, and it's possible that there's some other bug I haven't thought of that this fixes. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index a27d3138..1921304a 100644 --- a/Makefile +++ b/Makefile @@ -233,7 +233,7 @@ distclean: clean # xmlto can not read from standard input, so we mess with a tmp file. %.gz: %.xml $(DOCS_DIR)/examples-to-end.xsl xmlto --version | grep -E '^xmlto version 0\.0\.([0-9]|1[1-8])$$' >/dev/null || opt='--stringparam man.indent.verbatims=0' ; \ - xsltproc $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \ + xsltproc --novalid $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \ xmlto -o $(DOCS_DIR) $$opt man $<.tmp && \ gzip -f $(DOCS_DIR)/`basename $< .xml` rm -f $<.tmp @@ -256,7 +256,7 @@ $(SOURCE_DIR)/%_usage.erl: xmlto xhtml-nochunks `basename $< .xml`.xml ; rm `basename $< .xml`.xml cat `basename $< .xml`.html | \ xsltproc --novalid $(DOCS_DIR)/remove-namespaces.xsl - | \ - xsltproc --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \ + xsltproc --novalid --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \ xmllint --format - > $@ rm `basename $< .xml`.html -- cgit v1.2.1 From b7631fb3d6dce2fef23039949049e566b262ae7a Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 31 May 2011 12:03:33 +0100 Subject: rabbitmqctl report layout tidy --- src/rabbit_control.erl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 53dd117e..a2a054b4 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -309,7 +309,7 @@ action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), [action(status, ClusterNode, [], [], Inform) || ClusterNode <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], - Report = fun ({Descr, Module, InfoFun, KeysFun}, VHostArg) -> + Report = fun ({Descr, Module, InfoFun, KeysFun, VHostArg}) -> io:format("%% ~p~n", [[Descr] ++ VHostArg]), case Results = rpc_call(Node, Module, InfoFun, VHostArg) of [_|_] -> InfoItems = rpc_call(Node, Module, KeysFun, []), @@ -319,22 +319,23 @@ action(report, Node, _Args, _Opts, Inform) -> end end, GlobalQueries = [{"connections", rabbit_networking, connection_info_all, - connection_info_keys}, - {"channels", rabbit_channel, info_all, info_keys}], - VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys}, - {"exchanges", rabbit_exchange, info_all, info_keys}, - {"bindings", rabbit_binding, info_all, info_keys}, + connection_info_keys, []}, + {"channels", rabbit_channel, info_all, info_keys, []}], + VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys, []}, + {"exchanges", rabbit_exchange, info_all, info_keys, []}, + {"bindings", rabbit_binding, info_all, info_keys, []}, {"consumers", rabbit_amqqueue, consumers_all, - consumer_info_keys}], + consumer_info_keys, []}], VHosts = rpc_call(Node, rabbit_vhost, list, []), - [Report(Q, []) || Q <- GlobalQueries], - [Report(Q, [V]) || V <- VHosts, Q <- VHostQueries], + [Report(Q) || Q <- GlobalQueries], + [Report(setelement(5, Q, [V])) || Q <- VHostQueries, V <- VHosts], [begin io:format("%% ~p~n", [["permissions" | [VHost]]]), display_list(call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [binary_to_list(VHost)]})) end || VHost <- VHosts], + io:format("End of server status report~n"), ok. %%---------------------------------------------------------------------------- -- cgit v1.2.1 From 3fcdbab4ba7c2f043fccc36a694f840d0f342571 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 31 May 2011 13:36:23 +0100 Subject: List permissions differently --- src/rabbit_auth_backend_internal.erl | 10 ++++++-- src/rabbit_control.erl | 50 ++++++++++++++++++------------------ 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index f70813d1..7cbd5dca 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -28,7 +28,7 @@ hash_password/1]). -export([set_permissions/5, clear_permissions/2, list_permissions/0, list_vhost_permissions/1, list_user_permissions/1, - list_user_vhost_permissions/2]). + list_user_vhost_permissions/2, vhost_perms_info_keys/0]). -include("rabbit_auth_backend_spec.hrl"). @@ -70,11 +70,14 @@ -spec(list_user_vhost_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost()) -> [{regexp(), regexp(), regexp()}]). +-spec(vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()). -endif. %%---------------------------------------------------------------------------- +-define(PERMS_INFO_KEYS, [configure_perms, write_perms, read_perms]). + %% Implementation of rabbit_auth_backend description() -> @@ -283,13 +286,16 @@ clear_permissions(Username, VHostPath) -> virtual_host = VHostPath}}) end)). +vhost_perms_info_keys() -> [username] ++ ?PERMS_INFO_KEYS. + list_permissions() -> [{Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} || {Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} <- list_permissions(match_user_vhost('_', '_'))]. list_vhost_permissions(VHostPath) -> - [{Username, ConfigurePerm, WritePerm, ReadPerm} || + InfoKeys = vhost_perms_info_keys(), + [lists:zip(InfoKeys, [Username, ConfigurePerm, WritePerm, ReadPerm]) || {Username, _, ConfigurePerm, WritePerm, ReadPerm} <- list_permissions(rabbit_vhost:with( VHostPath, match_user_vhost('_', VHostPath)))]. diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index a2a054b4..7d3f7907 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -96,6 +96,18 @@ start() -> fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args). +print_report(Node, {Descr, Module, InfoFun, KeysFun}) -> + print_report(Node, {Descr, Module, InfoFun, KeysFun}, []). + +print_report(Node, {Descr, Module, InfoFun, KeysFun}, VHostArg) -> + io:format("%% ~p~n", [[Descr] ++ VHostArg]), + case Results = rpc_call(Node, Module, InfoFun, VHostArg) of + [_|_] -> InfoItems = rpc_call(Node, Module, KeysFun, []), + display_row([atom_to_list(I) || I <- InfoItems]), + display_info_list(Results, InfoItems); + _ -> ok + end. + print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args). print_badrpc_diagnostics(Node) -> @@ -302,39 +314,27 @@ action(clear_permissions, Node, [Username], Opts, Inform) -> action(list_permissions, Node, [], Opts, Inform) -> VHost = proplists:get_value(?VHOST_OPT, Opts), Inform("Listing permissions in vhost ~p", [VHost]), - display_list(call(Node, {rabbit_auth_backend_internal, - list_vhost_permissions, [VHost]})); + display_info_list(call(Node, {rabbit_auth_backend_internal, + list_vhost_permissions, [VHost]}), + rabbit_auth_backend_internal:vhost_perms_info_keys()); action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), [action(status, ClusterNode, [], [], Inform) || ClusterNode <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], - Report = fun ({Descr, Module, InfoFun, KeysFun, VHostArg}) -> - io:format("%% ~p~n", [[Descr] ++ VHostArg]), - case Results = rpc_call(Node, Module, InfoFun, VHostArg) of - [_|_] -> InfoItems = rpc_call(Node, Module, KeysFun, []), - display_row([atom_to_list(I) || I <- InfoItems]), - display_info_list(Results, InfoItems); - _ -> ok - end - end, GlobalQueries = [{"connections", rabbit_networking, connection_info_all, - connection_info_keys, []}, - {"channels", rabbit_channel, info_all, info_keys, []}], - VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys, []}, - {"exchanges", rabbit_exchange, info_all, info_keys, []}, - {"bindings", rabbit_binding, info_all, info_keys, []}, + connection_info_keys}, + {"channels", rabbit_channel, info_all, info_keys}], + VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys}, + {"exchanges", rabbit_exchange, info_all, info_keys}, + {"bindings", rabbit_binding, info_all, info_keys}, {"consumers", rabbit_amqqueue, consumers_all, - consumer_info_keys, []}], + consumer_info_keys}, + {"permissions", rabbit_auth_backend_internal, + list_vhost_permissions, vhost_perms_info_keys}], VHosts = rpc_call(Node, rabbit_vhost, list, []), - [Report(Q) || Q <- GlobalQueries], - [Report(setelement(5, Q, [V])) || Q <- VHostQueries, V <- VHosts], - [begin - io:format("%% ~p~n", [["permissions" | [VHost]]]), - display_list(call(Node, - {rabbit_auth_backend_internal, list_vhost_permissions, - [binary_to_list(VHost)]})) - end || VHost <- VHosts], + [print_report(Node, Q) || Q <- GlobalQueries], + [print_report(Node, Q, [V]) || Q <- VHostQueries, V <- VHosts], io:format("End of server status report~n"), ok. -- cgit v1.2.1 From 2b70875cc5a34c273def6f1c62c8696c15c4acee Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 31 May 2011 14:40:39 +0100 Subject: Report node-specific details separately --- src/rabbit.erl | 5 ++--- src/rabbit_control.erl | 17 ++++++++++++++--- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 7b507ff1..c239262d 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -178,7 +178,8 @@ -spec(stop_and_halt/0 :: () -> 'ok'). -spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())). -spec(status/0 :: - () -> [{running_applications, [{atom(), string(), string()}]} | + () -> [{pid, integer()} | + {running_applications, [{atom(), string(), string()}]} | {nodes, [{rabbit_mnesia:node_type(), [node()]}]} | {running_nodes, [node()]}]). -spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()). @@ -217,8 +218,6 @@ stop_and_halt() -> status() -> [{pid, list_to_integer(os:getpid())}, - {os, os:type()}, - {erlang_version, erlang:system_info(system_version)}, {running_applications, application:which_applications()}] ++ rabbit_mnesia:status(). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 7d3f7907..f012c246 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -17,7 +17,7 @@ -module(rabbit_control). -include("rabbit.hrl"). --export([start/0, stop/0, action/5, diagnostics/1]). +-export([start/0, stop/0, action/5, diagnostics/1, node_status/0]). -define(RPC_TIMEOUT, infinity). -define(WAIT_FOR_VM_ATTEMPTS, 5). @@ -38,6 +38,7 @@ -> 'ok'). -spec(diagnostics/1 :: (node()) -> [{string(), [any()]}]). -spec(usage/0 :: () -> no_return()). +-spec(node_status/0 :: () -> [{atom(), any()}]). -endif. @@ -139,6 +140,15 @@ usage() -> io:format("~s", [rabbit_ctl_usage:usage()]), quit(1). +node_status() -> + [{node_name, erlang:node()}, + {os, os:type()}, + {erlang_version, erlang:system_info(system_version)}, + {memory, erlang:memory()}, + {env, lists:filter(fun ({default_pass, _}) -> false; + (_) -> true + end, application:get_all_env(rabbit))}]. + %%---------------------------------------------------------------------------- action(stop, Node, [], _Opts, Inform) -> @@ -320,8 +330,9 @@ action(list_permissions, Node, [], Opts, Inform) -> action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), - [action(status, ClusterNode, [], [], Inform) || - ClusterNode <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], + action(status, Node, [], [], Inform), + [io:format("~p~n", [rpc_call(N, rabbit_control, node_status, [])]) || + N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], GlobalQueries = [{"connections", rabbit_networking, connection_info_all, connection_info_keys}, {"channels", rabbit_channel, info_all, info_keys}], -- cgit v1.2.1 From afe7b305578972e05de2f487b783aa6df33baebb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 31 May 2011 15:51:34 +0100 Subject: Allow binding:list_* to be invoked inside a tx meaningfully. --- src/rabbit_binding.erl | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 2f71bfab..5873537c 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -198,22 +198,33 @@ list(VHostPath) -> Route)]. list_for_source(SrcName) -> - Route = #route{binding = #binding{source = SrcName, _ = '_'}}, - [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route, - Route)]. + mnesia:async_dirty( + fun() -> + Route = #route{binding = #binding{source = SrcName, _ = '_'}}, + [B || #route{binding = B} + <- mnesia:match_object(rabbit_route, Route, read)] + end). list_for_destination(DstName) -> - Route = #route{binding = #binding{destination = DstName, _ = '_'}}, - [reverse_binding(B) || #reverse_route{reverse_binding = B} <- - mnesia:dirty_match_object(rabbit_reverse_route, - reverse_route(Route))]. + mnesia:async_dirty( + fun() -> + Route = #route{binding = #binding{destination = DstName, + _ = '_'}}, + [reverse_binding(B) || + #reverse_route{reverse_binding = B} <- + mnesia:match_object(rabbit_reverse_route, + reverse_route(Route), read)] + end). list_for_source_and_destination(SrcName, DstName) -> - Route = #route{binding = #binding{source = SrcName, - destination = DstName, - _ = '_'}}, - [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route, - Route)]. + mnesia:async_dirty( + fun() -> + Route = #route{binding = #binding{source = SrcName, + destination = DstName, + _ = '_'}}, + [B || #route{binding = B} <- mnesia:match_object(rabbit_route, + Route, read)] + end). info_keys() -> ?INFO_KEYS. -- cgit v1.2.1 From 39e3f17926ccc20917a7aa44ba958a7541a8f14e Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 31 May 2011 16:07:13 +0100 Subject: Move status info (again) --- src/rabbit.erl | 12 +++++++++++- src/rabbit_control.erl | 15 ++------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index c239262d..88b16474 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -180,6 +180,10 @@ -spec(status/0 :: () -> [{pid, integer()} | {running_applications, [{atom(), string(), string()}]} | + {os , {atom(), atom()}} | + {erlang_version , string()} | + {memory , any()} | + {env , [{atom() | term()}]} | {nodes, [{rabbit_mnesia:node_type(), [node()]}]} | {running_nodes, [node()]}]). -spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()). @@ -218,7 +222,13 @@ stop_and_halt() -> status() -> [{pid, list_to_integer(os:getpid())}, - {running_applications, application:which_applications()}] ++ + {running_applications, application:which_applications()}, + {os, os:type()}, + {erlang_version, erlang:system_info(system_version)}, + {memory, erlang:memory()}, + {env, lists:filter(fun ({default_pass, _}) -> false; + (_) -> true + end, application:get_all_env(rabbit))}] ++ rabbit_mnesia:status(). rotate_logs(BinarySuffix) -> diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index f012c246..ecd1b81f 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -17,7 +17,7 @@ -module(rabbit_control). -include("rabbit.hrl"). --export([start/0, stop/0, action/5, diagnostics/1, node_status/0]). +-export([start/0, stop/0, action/5, diagnostics/1]). -define(RPC_TIMEOUT, infinity). -define(WAIT_FOR_VM_ATTEMPTS, 5). @@ -38,7 +38,6 @@ -> 'ok'). -spec(diagnostics/1 :: (node()) -> [{string(), [any()]}]). -spec(usage/0 :: () -> no_return()). --spec(node_status/0 :: () -> [{atom(), any()}]). -endif. @@ -140,15 +139,6 @@ usage() -> io:format("~s", [rabbit_ctl_usage:usage()]), quit(1). -node_status() -> - [{node_name, erlang:node()}, - {os, os:type()}, - {erlang_version, erlang:system_info(system_version)}, - {memory, erlang:memory()}, - {env, lists:filter(fun ({default_pass, _}) -> false; - (_) -> true - end, application:get_all_env(rabbit))}]. - %%---------------------------------------------------------------------------- action(stop, Node, [], _Opts, Inform) -> @@ -330,8 +320,7 @@ action(list_permissions, Node, [], Opts, Inform) -> action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), - action(status, Node, [], [], Inform), - [io:format("~p~n", [rpc_call(N, rabbit_control, node_status, [])]) || + [action(status, N, [], [], Inform) || N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], GlobalQueries = [{"connections", rabbit_networking, connection_info_all, connection_info_keys}, -- cgit v1.2.1 From 304e1afd5907ddc6b0bd80f9f051a4d41fab6bb2 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 31 May 2011 16:21:49 +0100 Subject: Update manpage for rabbitmqctl status --- docs/rabbitmqctl.1.xml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index d034e02d..6e57f493 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -183,10 +183,14 @@ status - Displays various information about the RabbitMQ broker, - such as whether the RabbitMQ application on the current - node, its version number, what nodes are part of the - broker, which of these are running. + Displays broker status information such as the running applications + on the current Erlang node, RabbitMQ and Erlang versions, OS, memory + and environment details and which nodes are clustered. + + + Diagnostic information is displayed if the broker is not running, + cannot be reached, or does not permit queries due to security + constraints. For example: rabbitmqctl status -- cgit v1.2.1 From b77d1dc68d86448517505583948cd166130dac65 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 1 Jun 2011 11:03:02 +0100 Subject: Make sure the global name server is definitely up and synced by the time we treat the cluster as "up". --- src/rabbit_mnesia.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 2df76d4e..037f9687 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -92,6 +92,9 @@ init() -> ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true, fun maybe_upgrade_local_or_record_desired/0), + %% We intuitively expect the global name server to be up when + %% Mnesia is. In fact that's not the case - let's make it so. + ok = global:sync(), ok. is_db_empty() -> -- cgit v1.2.1 From 560b5e28aae4179b48a4c61f6134dd89008686af Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 1 Jun 2011 11:06:02 +0100 Subject: Tweak comment --- src/rabbit_mnesia.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 037f9687..568b9ce6 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -92,8 +92,9 @@ init() -> ensure_mnesia_dir(), ok = init_db(read_cluster_nodes_config(), true, fun maybe_upgrade_local_or_record_desired/0), - %% We intuitively expect the global name server to be up when - %% Mnesia is. In fact that's not the case - let's make it so. + %% We intuitively expect the global name server to be synced when + %% Mnesia is up. In fact that's not guaranteed to be the case - let's + %% make it so. ok = global:sync(), ok. -- cgit v1.2.1 From 06b44204c0fc6b0120db0925a8efba4fbb41ca98 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 1 Jun 2011 16:39:00 +0100 Subject: rabbit_exchange:peek_serial/1. --- src/rabbit_exchange.erl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 84a44cd2..14b078c0 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -24,7 +24,7 @@ info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). %% these must be run inside a mnesia tx --export([maybe_auto_delete/1, serial/1]). +-export([maybe_auto_delete/1, serial/1, peek_serial/1]). %%---------------------------------------------------------------------------- @@ -75,7 +75,8 @@ -spec(maybe_auto_delete/1:: (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). --spec(serial/1:: (rabbit_types:exchange()) -> 'none' | pos_integer()). +-spec(serial/1 :: (rabbit_types:exchange()) -> 'none' | pos_integer()). +-spec(peek_serial/1 :: (name()) -> pos_integer()). -endif. @@ -330,6 +331,12 @@ next_serial(XName) -> #exchange_serial{name = XName, next = Serial + 1}, write), Serial. +peek_serial(XName) -> + case mnesia:read({rabbit_exchange_serial, XName}) of + [#exchange_serial{next = Serial}] -> Serial; + _ -> exchange_already_deleted + end. + %% Used with atoms from records; e.g., the type is expected to exist. type_to_module(T) -> {ok, Module} = rabbit_registry:lookup_module(exchange, T), -- cgit v1.2.1 From 4782c158507022c3a42ae7395da6eb8421dfeca0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 1 Jun 2011 16:43:25 +0100 Subject: Oops --- src/rabbit_exchange.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 14b078c0..916f3c52 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -76,7 +76,7 @@ (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). -spec(serial/1 :: (rabbit_types:exchange()) -> 'none' | pos_integer()). --spec(peek_serial/1 :: (name()) -> pos_integer()). +-spec(peek_serial/1 :: (name()) -> pos_integer() | 'exchange_already_deleted'). -endif. -- cgit v1.2.1 From 2eb73bf9a4a804eace34a2e5ce70444fedd590ac Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 1 Jun 2011 16:46:34 +0100 Subject: And change the name --- src/rabbit_exchange.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 916f3c52..b9b0203e 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -76,7 +76,7 @@ (rabbit_types:exchange()) -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}). -spec(serial/1 :: (rabbit_types:exchange()) -> 'none' | pos_integer()). --spec(peek_serial/1 :: (name()) -> pos_integer() | 'exchange_already_deleted'). +-spec(peek_serial/1 :: (name()) -> pos_integer() | 'undefined'). -endif. @@ -334,7 +334,7 @@ next_serial(XName) -> peek_serial(XName) -> case mnesia:read({rabbit_exchange_serial, XName}) of [#exchange_serial{next = Serial}] -> Serial; - _ -> exchange_already_deleted + _ -> undefined end. %% Used with atoms from records; e.g., the type is expected to exist. -- cgit v1.2.1 From 5a7e355c3fc2f589c15a72ef8c0f95f0c4a92e08 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 2 Jun 2011 10:14:50 +0100 Subject: cosmetic --- src/rabbit_upgrade_functions.erl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index bead388d..5e4a1224 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -34,15 +34,15 @@ -ifdef(use_specs). --spec(remove_user_scope/0 :: () -> 'ok'). --spec(hash_passwords/0 :: () -> 'ok'). --spec(add_ip_to_listener/0 :: () -> 'ok'). --spec(internal_exchanges/0 :: () -> 'ok'). +-spec(remove_user_scope/0 :: () -> 'ok'). +-spec(hash_passwords/0 :: () -> 'ok'). +-spec(add_ip_to_listener/0 :: () -> 'ok'). +-spec(internal_exchanges/0 :: () -> 'ok'). -spec(user_to_internal_user/0 :: () -> 'ok'). --spec(topic_trie/0 :: () -> 'ok'). +-spec(topic_trie/0 :: () -> 'ok'). +-spec(semi_durable_route/0 :: () -> 'ok'). -spec(exchange_event_serial/0 :: () -> 'ok'). --spec(semi_durable_route/0 :: () -> 'ok'). --spec(trace_exchanges/0 :: () -> 'ok'). +-spec(trace_exchanges/0 :: () -> 'ok'). -endif. -- cgit v1.2.1 From aacb1cd4fc5daacdea5c69a8dcc3fa14de522956 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 2 Jun 2011 10:44:30 +0100 Subject: cosmetic --- src/rabbit_amqqueue.erl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index d029ff1d..32798cb2 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -193,12 +193,12 @@ recover_durable_queues(DurableQueues) -> declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), - Q = start_queue_process(#amqqueue{name = QueueName, - durable = Durable, - auto_delete = AutoDelete, - arguments = Args, + Q = start_queue_process(#amqqueue{name = QueueName, + durable = Durable, + auto_delete = AutoDelete, + arguments = Args, exclusive_owner = Owner, - pid = none}), + pid = none}), case gen_server2:call(Q#amqqueue.pid, {init, false}, infinity) of not_found -> rabbit_misc:not_found(QueueName); Q1 -> Q1 @@ -478,11 +478,11 @@ delete_queue(QueueName) -> rabbit_binding:remove_transient_for_destination(QueueName). pseudo_queue(QueueName, Pid) -> - #amqqueue{name = QueueName, - durable = false, + #amqqueue{name = QueueName, + durable = false, auto_delete = false, - arguments = [], - pid = Pid}. + arguments = [], + pid = Pid}. safe_delegate_call_ok(F, Pids) -> case delegate:invoke(Pids, fun (Pid) -> -- cgit v1.2.1 From da71b755e5da5eaebb732205d9ee43f8fef263fc Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 2 Jun 2011 10:46:00 +0100 Subject: cosmetic: eliminate gratuitous diffs vs 'default' --- src/rabbit_amqqueue_process.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index b1c95338..17c35e90 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -35,7 +35,7 @@ -export([init_with_backing_queue_state/7]). -% Queue's state +%% Queue's state -record(q, {q, exclusive_consumer, has_had_consumers, @@ -843,11 +843,11 @@ emit_consumer_deleted(ChPid, ConsumerTag) -> prioritise_call(Msg, _From, _State) -> case Msg of - info -> 9; - {info, _Items} -> 9; - consumers -> 9; - {run_backing_queue, _Mod, _Fun} -> 6; - _ -> 0 + info -> 9; + {info, _Items} -> 9; + consumers -> 9; + {run_backing_queue, _Mod, _Fun} -> 6; + _ -> 0 end. prioritise_cast(Msg, _State) -> -- cgit v1.2.1 From 1d72bbec0dbd79af9efc0fb81d48ff216e04a2de Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 2 Jun 2011 11:08:52 +0100 Subject: Undo unneeded change to boots --- src/rabbit.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 02477b65..e6e80b4a 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -39,12 +39,6 @@ {requires, pre_boot}, {enables, external_infrastructure}]}). --rabbit_boot_step({rabbit_registry, - [{description, "plugin registry"}, - {mfa, {rabbit_sup, start_child, - [rabbit_registry]}}, - {enables, external_infrastructure}]}). - -rabbit_boot_step({database, [{mfa, {rabbit_mnesia, init, []}}, {requires, file_handle_cache}, @@ -66,6 +60,13 @@ -rabbit_boot_step({external_infrastructure, [{description, "external infrastructure ready"}]}). +-rabbit_boot_step({rabbit_registry, + [{description, "plugin registry"}, + {mfa, {rabbit_sup, start_child, + [rabbit_registry]}}, + {requires, external_infrastructure}, + {enables, kernel_ready}]}). + -rabbit_boot_step({rabbit_log, [{description, "logging server"}, {mfa, {rabbit_sup, start_restartable_child, -- cgit v1.2.1 From 6bf3438203356cb164fd109720816c434ad96884 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 2 Jun 2011 13:12:32 +0100 Subject: Cosmetic --- src/rabbit_control.erl | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index ecd1b81f..57a3fa55 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -26,6 +26,19 @@ -define(NODE_OPT, "-n"). -define(VHOST_OPT, "-p"). +-define(GLOBAL_QUERIES, + [{"connections", rabbit_networking, connection_info_all, + connection_info_keys}, + {"channels", rabbit_channel, info_all, info_keys}]). + +-define(VHOST_QUERIES, + [{"queues", rabbit_amqqueue, info_all, info_keys}, + {"exchanges", rabbit_exchange, info_all, info_keys}, + {"bindings", rabbit_binding, info_all, info_keys}, + {"consumers", rabbit_amqqueue, consumers_all, consumer_info_keys}, + {"permissions", rabbit_auth_backend_internal, list_vhost_permissions, + vhost_perms_info_keys}]). + %%---------------------------------------------------------------------------- -ifdef(use_specs). @@ -322,19 +335,9 @@ action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), [action(status, N, [], [], Inform) || N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], - GlobalQueries = [{"connections", rabbit_networking, connection_info_all, - connection_info_keys}, - {"channels", rabbit_channel, info_all, info_keys}], - VHostQueries = [{"queues", rabbit_amqqueue, info_all, info_keys}, - {"exchanges", rabbit_exchange, info_all, info_keys}, - {"bindings", rabbit_binding, info_all, info_keys}, - {"consumers", rabbit_amqqueue, consumers_all, - consumer_info_keys}, - {"permissions", rabbit_auth_backend_internal, - list_vhost_permissions, vhost_perms_info_keys}], VHosts = rpc_call(Node, rabbit_vhost, list, []), - [print_report(Node, Q) || Q <- GlobalQueries], - [print_report(Node, Q, [V]) || Q <- VHostQueries, V <- VHosts], + [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES], + [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts], io:format("End of server status report~n"), ok. -- cgit v1.2.1 From 47e0876dcea3c307fd99b5c327f3fac436e43516 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 2 Jun 2011 14:41:09 +0100 Subject: Move set_table_value from federation to misc --- src/rabbit_misc.erl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 53171e87..cebb888f 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -25,7 +25,7 @@ protocol_error/3, protocol_error/4, protocol_error/1]). -export([not_found/1, assert_args_equivalence/4]). -export([dirty_read/1]). --export([table_lookup/2]). +-export([table_lookup/2, set_table_value/4]). -export([r/3, r/2, r_arg/4, rs/1]). -export([enable_cover/0, report_cover/0]). -export([enable_cover/1, report_cover/1]). @@ -104,6 +104,11 @@ -spec(table_lookup/2 :: (rabbit_framing:amqp_table(), binary()) -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}). +-spec(set_table_value/4 :: + (rabbit_framing:amqp_table(), binary(), + rabbit_framing:amqp_field_type(), rabbit_framing:amqp_value()) + -> rabbit_framing:amqp_table()). + -spec(r/2 :: (rabbit_types:vhost(), K) -> rabbit_types:r3(rabbit_types:vhost(), K, '_') when is_subtype(K, atom())). @@ -268,6 +273,10 @@ table_lookup(Table, Key) -> false -> undefined end. +set_table_value(Table, Key, Type, Value) -> + sort_field_table( + lists:keystore(Key, 1, Table, {Key, Type, Value})). + r(#resource{virtual_host = VHostPath}, Kind, Name) when is_binary(Name) -> #resource{virtual_host = VHostPath, kind = Kind, name = Name}; -- cgit v1.2.1 From ac0245f94d2c6977014279c77ae9673dfb222bf5 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 2 Jun 2011 15:00:44 +0100 Subject: Move pget from federation / mgmt to misc. --- src/rabbit_misc.erl | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index cebb888f..b6b97f6d 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -56,6 +56,7 @@ -export([const_ok/0, const/1]). -export([ntoa/1, ntoab/1]). -export([is_process_alive/1]). +-export([pget/2, pget/3, pget_or_die/2]). %%---------------------------------------------------------------------------- @@ -201,6 +202,9 @@ -spec(ntoa/1 :: (inet:ip_address()) -> string()). -spec(ntoab/1 :: (inet:ip_address()) -> string()). -spec(is_process_alive/1 :: (pid()) -> boolean()). +-spec(pget/2 :: (term(), [term()]) -> term()). +-spec(pget/3 :: (term(), [term()], term()) -> term()). +-spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()). -endif. @@ -906,3 +910,12 @@ is_process_alive(Pid) -> true -> true; _ -> false end. + +pget(K, P) -> proplists:get_value(K, P). +pget(K, P, D) -> proplists:get_value(K, P, D). + +pget_or_die(K, P) -> + case proplists:get_value(K, P) of + undefined -> exit({error, key_missing, K}); + V -> V + end. -- cgit v1.2.1 From 66beecac50e410a83441807f8800ff32721b9c00 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 2 Jun 2011 15:09:00 +0100 Subject: Reduce diff from bug23554: minor tweaks to gm_soak_test (correction of error messages) --- src/gm_soak_test.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl index dae42ac7..5e5a3a5a 100644 --- a/src/gm_soak_test.erl +++ b/src/gm_soak_test.erl @@ -80,12 +80,12 @@ handle_msg([], From, {test_msg, Num}) -> {ok, Num} -> ok; {ok, Num1} when Num < Num1 -> exit({{from, From}, - {duplicate_delivery_of, Num1}, - {expecting, Num}}); + {duplicate_delivery_of, Num}, + {expecting, Num1}}); {ok, Num1} -> exit({{from, From}, - {missing_delivery_of, Num}, - {received_early, Num1}}); + {received_early, Num}, + {expecting, Num1}}); error -> exit({{from, From}, {received_premature_delivery, Num}}) -- cgit v1.2.1 From 8dada8a4ccb33e7a36ac3c1592379ae4ac904df4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 2 Jun 2011 15:25:42 +0100 Subject: Reduce diff from bug23554: Allow formatting of nested info items --- src/rabbit_control.erl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8172f804..8e0a2a53 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -372,6 +372,12 @@ format_info_item([{TableEntryKey, TableEntryType, _TableEntryValue} | _] = Value) when is_binary(TableEntryKey) andalso is_atom(TableEntryType) -> io_lib:format("~1000000000000p", [prettify_amqp_table(Value)]); +format_info_item([T | _] = Value) + when is_tuple(T) orelse is_pid(T) orelse is_binary(T) orelse is_atom(T) orelse + is_list(T) -> + "[" ++ + lists:nthtail(2, lists:append( + [", " ++ format_info_item(E) || E <- Value])) ++ "]"; format_info_item(Value) -> io_lib:format("~w", [Value]). -- cgit v1.2.1 From 3b57619d1e4b2ed349390f82e68ba8ae2517c425 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 2 Jun 2011 15:32:33 +0100 Subject: Reduce diff from bug23554: Scaffolding for programmatically deciding backing queue --- src/rabbit_amqqueue_process.erl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 8091e2c2..f7b710a4 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -97,12 +97,11 @@ info_keys() -> ?INFO_KEYS. init(Q) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), process_flag(trap_exit, true), - {ok, BQ} = application:get_env(backing_queue_module), {ok, #q{q = Q#amqqueue{pid = self()}, exclusive_consumer = none, has_had_consumers = false, - backing_queue = BQ, + backing_queue = backing_queue_module(Q), backing_queue_state = undefined, active_consumers = queue:new(), blocked_consumers = queue:new(), @@ -226,6 +225,10 @@ next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> timed -> {ensure_sync_timer(State1), 0 } end. +backing_queue_module(#amqqueue{}) -> + {ok, BQM} = application:get_env(backing_queue_module), + BQM. + ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> {ok, TRef} = timer:apply_after( ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), -- cgit v1.2.1 From 4013096a30291bd1f98b6016e018f405c7dbe0f8 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 2 Jun 2011 15:40:26 +0100 Subject: Reduce diff from bug23554: Extend backing queue api to present reason for termination --- include/rabbit_backing_queue_spec.hrl | 4 ++-- src/rabbit_amqqueue_process.erl | 12 ++++++------ src/rabbit_backing_queue.erl | 4 ++-- src/rabbit_tests.erl | 8 ++++---- src/rabbit_variable_queue.erl | 6 +++--- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 1c2b94e2..295d9039 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -32,8 +32,8 @@ -spec(stop/0 :: () -> 'ok'). -spec(init/4 :: (rabbit_types:amqqueue(), attempt_recovery(), async_callback(), sync_callback()) -> state()). --spec(terminate/1 :: (state()) -> state()). --spec(delete_and_terminate/1 :: (state()) -> state()). +-spec(terminate/2 :: (any(), state()) -> state()). +-spec(delete_and_terminate/2 :: (any(), state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). -spec(publish/4 :: (rabbit_types:basic_message(), rabbit_types:message_properties(), pid(), state()) -> diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index f7b710a4..07a24af8 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -114,16 +114,16 @@ init(Q) -> msg_id_to_channel = dict:new()}, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. -terminate(shutdown, State = #q{backing_queue = BQ}) -> - terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); -terminate({shutdown, _}, State = #q{backing_queue = BQ}) -> - terminate_shutdown(fun (BQS) -> BQ:terminate(BQS) end, State); -terminate(_Reason, State = #q{backing_queue = BQ}) -> +terminate(shutdown = R, State = #q{backing_queue = BQ}) -> + terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State); +terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) -> + terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State); +terminate(Reason, State = #q{backing_queue = BQ}) -> %% FIXME: How do we cancel active subscriptions? terminate_shutdown(fun (BQS) -> rabbit_event:notify( queue_deleted, [{pid, self()}]), - BQS1 = BQ:delete_and_terminate(BQS), + BQS1 = BQ:delete_and_terminate(Reason, BQS), %% don't care if the internal delete %% doesn't return 'ok'. rabbit_amqqueue:internal_delete(qname(State)), diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index addaabc5..217ad3eb 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -49,11 +49,11 @@ behaviour_info(callbacks) -> {init, 4}, %% Called on queue shutdown when queue isn't being deleted. - {terminate, 1}, + {terminate, 2}, %% Called when the queue is terminating and needs to delete all %% its content. - {delete_and_terminate, 1}, + {delete_and_terminate, 2}, %% Remove all messages in the queue, but not messages which have %% been fetched and are pending acks. diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 1a37cdff..3f4aa54e 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2116,7 +2116,7 @@ with_fresh_variable_queue(Fun) -> {delta, {delta, undefined, 0, undefined}}, {q3, 0}, {q4, 0}, {len, 0}]), - _ = rabbit_variable_queue:delete_and_terminate(Fun(VQ)), + _ = rabbit_variable_queue:delete_and_terminate(shutdown, Fun(VQ)), passed. test_variable_queue() -> @@ -2284,7 +2284,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) -> Count + Count, VQ3), {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false, Count, VQ4), - _VQ6 = rabbit_variable_queue:terminate(VQ5), + _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), VQ7 = variable_queue_init(test_amqqueue(true), true), {{_Msg1, true, _AckTag1, Count1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7), @@ -2301,7 +2301,7 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> {_Guids, VQ4} = rabbit_variable_queue:requeue(AckTags, fun(X) -> X end, VQ3), VQ5 = rabbit_variable_queue:timeout(VQ4), - _VQ6 = rabbit_variable_queue:terminate(VQ5), + _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5), VQ7 = variable_queue_init(test_amqqueue(true), true), {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7), VQ8. @@ -2336,7 +2336,7 @@ test_queue_recover() -> VQ1 = variable_queue_init(Q, true), {{_Msg1, true, _AckTag1, CountMinusOne}, VQ2} = rabbit_variable_queue:fetch(true, VQ1), - _VQ3 = rabbit_variable_queue:delete_and_terminate(VQ2), + _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2), rabbit_amqqueue:internal_delete(QName) end), passed. diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index 8ac3ad43..a167cca0 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -16,7 +16,7 @@ -module(rabbit_variable_queue). --export([init/4, terminate/1, delete_and_terminate/1, +-export([init/4, terminate/2, delete_and_terminate/2, purge/1, publish/4, publish_delivered/5, drain_confirmed/1, fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, dropwhile/2, @@ -452,7 +452,7 @@ init(#amqqueue { name = QueueName, durable = true }, true, init(true, IndexState, DeltaCount, Terms1, AsyncCallback, SyncCallback, PersistentClient, TransientClient). -terminate(State) -> +terminate(_Reason, State) -> State1 = #vqstate { persistent_count = PCount, index_state = IndexState, msg_store_clients = {MSCStateP, MSCStateT} } = @@ -473,7 +473,7 @@ terminate(State) -> %% the only difference between purge and delete is that delete also %% needs to delete everything that's been delivered and not ack'd. -delete_and_terminate(State) -> +delete_and_terminate(_Reason, State) -> %% TODO: there is no need to interact with qi at all - which we do %% as part of 'purge' and 'remove_pending_ack', other than %% deleting it. -- cgit v1.2.1 From 9c31f2a89f7e6ae55030892ea39fba85647db019 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 2 Jun 2011 18:03:46 +0100 Subject: Oops, we don't map true/false to transaction/none on recover. Let's fix that here... --- src/rabbit_exchange.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index b9b0203e..cab1b99f 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -94,7 +94,7 @@ recover() -> true -> store(X); false -> ok end, - rabbit_exchange:callback(X, create, [Tx, X]) + rabbit_exchange:callback(X, create, [map_create_tx(Tx), X]) end, rabbit_durable_exchange), [XName || #exchange{name = XName} <- Xs]. @@ -128,10 +128,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> end end, fun ({new, Exchange}, Tx) -> - ok = XT:create(case Tx of - true -> transaction; - false -> none - end, Exchange), + ok = XT:create(map_create_tx(Tx), Exchange), rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)), Exchange; ({existing, Exchange}, _Tx) -> @@ -140,6 +137,9 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) -> Err end). +map_create_tx(true) -> transaction; +map_create_tx(false) -> none. + store(X = #exchange{name = Name, type = Type}) -> ok = mnesia:write(rabbit_exchange, X, write), case (type_to_module(Type)):serialise_events() of -- cgit v1.2.1 From 1cced6c5f404863983ee958b1424ff449ceedc69 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Thu, 2 Jun 2011 18:12:17 +0100 Subject: Split rabbitmqctl status query --- docs/rabbitmqctl.1.xml | 29 +++++++++++++++++++++++++++-- src/rabbit.erl | 23 +++++++++++------------ src/rabbit_control.erl | 26 +++++++++++++++++++------- 3 files changed, 57 insertions(+), 21 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 6e57f493..ceb4e98b 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -184,8 +184,9 @@ Displays broker status information such as the running applications - on the current Erlang node, RabbitMQ and Erlang versions, OS, memory - and environment details and which nodes are clustered. + on the current Erlang node, RabbitMQ and Erlang versions and OS name. + (See the cluster_status command to find out which + nodes are clustered and running.) Diagnostic information is displayed if the broker is not running, @@ -381,6 +382,20 @@ + + cluster_status + + + Displays all the nodes in the cluster grouped by node type, + together with the currently running nodes. + + For example: + rabbitmqctl cluster_status + + This command displays the nodes in the cluster. + + + @@ -1283,6 +1298,16 @@ + + environment + + + Display the name and value of each variable in the application environment. + This can be used to verify that options specified for the rabbit + application in the configuration file have been read. + + + report diff --git a/src/rabbit.erl b/src/rabbit.erl index 88b16474..9a3de988 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -18,7 +18,7 @@ -behaviour(application). --export([prepare/0, start/0, stop/0, stop_and_halt/0, status/0, +-export([prepare/0, start/0, stop/0, stop_and_halt/0, status/0, environment/0, rotate_logs/1]). -export([start/2, stop/1]). @@ -180,12 +180,10 @@ -spec(status/0 :: () -> [{pid, integer()} | {running_applications, [{atom(), string(), string()}]} | - {os , {atom(), atom()}} | - {erlang_version , string()} | - {memory , any()} | - {env , [{atom() | term()}]} | - {nodes, [{rabbit_mnesia:node_type(), [node()]}]} | - {running_nodes, [node()]}]). + {os, {atom(), atom()}} | + {erlang_version, string()} | + {memory, any()}]). +-spec(environment/0 :: () -> [{atom() | term()}]). -spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()). -spec(maybe_insert_default_data/0 :: () -> 'ok'). @@ -225,11 +223,12 @@ status() -> {running_applications, application:which_applications()}, {os, os:type()}, {erlang_version, erlang:system_info(system_version)}, - {memory, erlang:memory()}, - {env, lists:filter(fun ({default_pass, _}) -> false; - (_) -> true - end, application:get_all_env(rabbit))}] ++ - rabbit_mnesia:status(). + {memory, erlang:memory()}]. + +environment() -> + lists:filter(fun ({default_pass, _}) -> false; + (_) -> true + end, application:get_all_env(rabbit)). rotate_logs(BinarySuffix) -> Suffix = binary_to_list(BinarySuffix), diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 57a3fa55..1fcd3de0 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -192,11 +192,15 @@ action(wait, Node, [], _Opts, Inform) -> action(status, Node, [], _Opts, Inform) -> Inform("Status of node ~p", [Node]), - case call(Node, {rabbit, status, []}) of - {badrpc, _} = Res -> Res; - Res -> io:format("~p~n", [Res]), - ok - end; + display_call_result(Node, {rabbit, status, []}); + +action(cluster_status, Node, [], _Opts, Inform) -> + Inform("Cluster status of node ~p", [Node]), + display_call_result(Node, {rabbit_mnesia, status, []}); + +action(environment, Node, _App, _Opts, Inform) -> + Inform("Application environment of node ~p", [Node]), + display_call_result(Node, {rabbit, environment, []}); action(rotate_logs, Node, [], _Opts, Inform) -> Inform("Reopening logs for node ~p", [Node]), @@ -333,8 +337,9 @@ action(list_permissions, Node, [], Opts, Inform) -> action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n", [erlang:universaltime()]), - [action(status, N, [], [], Inform) || - N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, [])], + [ok = action(Action, N, [], [], Inform) || + N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, []), + Action <- [status, cluster_status, environment]], VHosts = rpc_call(Node, rabbit_vhost, list, []), [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES], [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts], @@ -416,6 +421,13 @@ display_list(L) when is_list(L) -> ok; display_list(Other) -> Other. +display_call_result(Node, MFA) -> + case call(Node, MFA) of + {badrpc, _} = Res -> Res; + Res -> io:format("~p~n", [Res]), + ok + end. + call(Node, {Mod, Fun, Args}) -> rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary/1, Args)). -- cgit v1.2.1 From d953296ea3663ec61cc07a7f270b96d83d37442b Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 3 Jun 2011 11:22:01 +0100 Subject: Show docs in usage --- docs/rabbitmqctl.1.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index ceb4e98b..362e752d 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1301,8 +1301,8 @@ environment - - Display the name and value of each variable in the application environment. + + The "environment" command displays variables in the application environment. This can be used to verify that options specified for the rabbit application in the configuration file have been read. @@ -1311,10 +1311,10 @@ report - - Generate a server status report containing a concatenation of all server status - information for support purposes. The output should be redirected to a - file when accompanying a support request. + + The "report" command produces a server status report which is a concatenation + of all server status information. The output should be redirected to a file + when accompanying a support request. For example: -- cgit v1.2.1 From e3dbbc0f6f93c12f0997baee05b3fa9ab30f0280 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 3 Jun 2011 11:31:22 +0100 Subject: Less verbose description of environment command --- docs/rabbitmqctl.1.xml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 362e752d..5024a45c 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1302,9 +1302,8 @@ environment - The "environment" command displays variables in the application environment. - This can be used to verify that options specified for the rabbit - application in the configuration file have been read. + The "environment" command displays variables in the rabbit application + environment. -- cgit v1.2.1 From 48cf957a5074442eae421dc148ee4ced7cec4227 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 3 Jun 2011 11:45:53 +0100 Subject: Need a space here --- docs/rabbitmqctl.1.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 362e752d..8f99f8fc 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1278,7 +1278,7 @@ - list_consumers-p vhostpath + list_consumers -p vhostpath List consumers, i.e. subscriptions to a queue's message -- cgit v1.2.1 From 5f152e2e3c555ba96531985ca35cdb64cda362f4 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 3 Jun 2011 11:51:50 +0100 Subject: Change rabbitmqctl docs Move environment and report from Server Status to Application and Cluster Management Remove list_consumers, environment and report from usage info --- docs/rabbitmqctl.1.xml | 64 +++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 5024a45c..a222c742 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -202,6 +202,34 @@ + + environment + + + Display the name and value of each variable in the application environment. + + + + + + report + + + Generate a server status report containing a concatenation of all server status + information for support purposes. The output should be redirected to a + file when accompanying a support request. + + + For example: + + rabbitmqctl report > server_report.txt + + This command creates a server report which may be attached to a + support request email. + + + + reset @@ -1290,38 +1318,10 @@ indicating whether acknowledgements are expected for messages delivered to this consumer. - - The output format for "list_consumers" is a list of rows containing, - in order, the queue name, channel process id, consumer tag, and a - boolean indicating whether acknowledgements are expected from the - consumer. - - - - - environment - - - The "environment" command displays variables in the rabbit application - environment. - - - - - report - - - The "report" command produces a server status report which is a concatenation - of all server status information. The output should be redirected to a file - when accompanying a support request. - - - For example: - - rabbitmqctl report > server_report.txt - - This command creates a server report which may be attached to a - support request email. + + The output is a list of rows containing, in order, the queue name, + channel process id, consumer tag, and a boolean indicating whether + acknowledgements are expected from the consumer. -- cgit v1.2.1 From 90d224ee4539c4b3209a9528afac5fda823eb9a3 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 3 Jun 2011 11:57:40 +0100 Subject: Use a list comp to filter, sort the list. --- src/rabbit.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 9a3de988..8866a1b7 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -226,9 +226,9 @@ status() -> {memory, erlang:memory()}]. environment() -> - lists:filter(fun ({default_pass, _}) -> false; - (_) -> true - end, application:get_all_env(rabbit)). + lists:keysort( + 1, [P || P = {K, _} <- application:get_all_env(rabbit), + K =/= default_pass]). rotate_logs(BinarySuffix) -> Suffix = binary_to_list(BinarySuffix), -- cgit v1.2.1 From a00dc3cc5975549198ddc91efc461fb76727beb8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 3 Jun 2011 12:17:58 +0100 Subject: Reformat, rephrase the "diagnostic information" para and move it to the top since it always applies. --- docs/rabbitmqctl.1.xml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index c2434770..51754895 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -59,6 +59,11 @@ RabbitMQ broker. It performs all actions by connecting to one of the broker's nodes. + + Diagnostic information is displayed if the broker was not + running, could not be reached, or rejected the connection due to + mismatching Erlang cookies. + @@ -183,15 +188,11 @@ status - Displays broker status information such as the running applications - on the current Erlang node, RabbitMQ and Erlang versions and OS name. - (See the cluster_status command to find out which - nodes are clustered and running.) - - - Diagnostic information is displayed if the broker is not running, - cannot be reached, or does not permit queries due to security - constraints. + Displays broker status information such as the running + applications on the current Erlang node, RabbitMQ and + Erlang versions and OS name. (See + the cluster_status command to find + out which nodes are clustered and running.) For example: rabbitmqctl status -- cgit v1.2.1 From ab94566c01498db333cb96261d24b35ebd9ab9b4 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 3 Jun 2011 12:20:56 +0100 Subject: Reformat and move environment / report back (and take status with them). --- docs/rabbitmqctl.1.xml | 96 ++++++++++++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 51754895..b8d6fbd3 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -184,53 +184,6 @@ - - status - - - Displays broker status information such as the running - applications on the current Erlang node, RabbitMQ and - Erlang versions and OS name. (See - the cluster_status command to find - out which nodes are clustered and running.) - - For example: - rabbitmqctl status - - This command displays information about the RabbitMQ - broker. - - - - - - environment - - - Display the name and value of each variable in the application environment. - - - - - - report - - - Generate a server status report containing a concatenation of all server status - information for support purposes. The output should be redirected to a - file when accompanying a support request. - - - For example: - - rabbitmqctl report > server_report.txt - - This command creates a server report which may be attached to a - support request email. - - - - reset @@ -1326,6 +1279,55 @@ + + + status + + + Displays broker status information such as the running + applications on the current Erlang node, RabbitMQ and + Erlang versions and OS name. (See + the cluster_status command to find + out which nodes are clustered and running.) + + For example: + rabbitmqctl status + + This command displays information about the RabbitMQ + broker. + + + + + + environment + + + Display the name and value of each variable in the + application environment. + + + + + + report + + + Generate a server status report containing a + concatenation of all server status information for + support purposes. The output should be redirected to a + file when accompanying a support request. + + + For example: + + rabbitmqctl report > server_report.txt + + This command creates a server report which may be + attached to a support request email. + + + -- cgit v1.2.1 From 99761af89a8f3a2cf9b175b750f89e6bc2173343 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 3 Jun 2011 12:38:17 +0100 Subject: Report cosmetics --- src/rabbit_control.erl | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 1fcd3de0..5f8d9f97 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -27,16 +27,16 @@ -define(VHOST_OPT, "-p"). -define(GLOBAL_QUERIES, - [{"connections", rabbit_networking, connection_info_all, + [{"Connections", rabbit_networking, connection_info_all, connection_info_keys}, - {"channels", rabbit_channel, info_all, info_keys}]). + {"Channels", rabbit_channel, info_all, info_keys}]). -define(VHOST_QUERIES, - [{"queues", rabbit_amqqueue, info_all, info_keys}, - {"exchanges", rabbit_exchange, info_all, info_keys}, - {"bindings", rabbit_binding, info_all, info_keys}, - {"consumers", rabbit_amqqueue, consumers_all, consumer_info_keys}, - {"permissions", rabbit_auth_backend_internal, list_vhost_permissions, + [{"Queues", rabbit_amqqueue, info_all, info_keys}, + {"Exchanges", rabbit_exchange, info_all, info_keys}, + {"Bindings", rabbit_binding, info_all, info_keys}, + {"Consumers", rabbit_amqqueue, consumers_all, consumer_info_keys}, + {"Permissions", rabbit_auth_backend_internal, list_vhost_permissions, vhost_perms_info_keys}]). %%---------------------------------------------------------------------------- @@ -110,16 +110,21 @@ start() -> fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args). print_report(Node, {Descr, Module, InfoFun, KeysFun}) -> - print_report(Node, {Descr, Module, InfoFun, KeysFun}, []). + io:format("~s:~n", [Descr]), + print_report0(Node, {Module, InfoFun, KeysFun}, []). print_report(Node, {Descr, Module, InfoFun, KeysFun}, VHostArg) -> - io:format("%% ~p~n", [[Descr] ++ VHostArg]), + io:format("~s on ~s:~n", [Descr, VHostArg]), + print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg). + +print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg) -> case Results = rpc_call(Node, Module, InfoFun, VHostArg) of [_|_] -> InfoItems = rpc_call(Node, Module, KeysFun, []), display_row([atom_to_list(I) || I <- InfoItems]), display_info_list(Results, InfoItems); _ -> ok - end. + end, + io:nl(). print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args). @@ -336,10 +341,10 @@ action(list_permissions, Node, [], Opts, Inform) -> rabbit_auth_backend_internal:vhost_perms_info_keys()); action(report, Node, _Args, _Opts, Inform) -> - io:format("Reporting server status on ~p~n", [erlang:universaltime()]), - [ok = action(Action, N, [], [], Inform) || - N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, []), - Action <- [status, cluster_status, environment]], + io:format("Reporting server status on ~p~n~n", [erlang:universaltime()]), + [begin ok = action(Action, N, [], [], Inform), io:nl() end || + N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, []), + Action <- [status, cluster_status, environment]], VHosts = rpc_call(Node, rabbit_vhost, list, []), [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES], [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts], -- cgit v1.2.1 From bc0233a9aa74bf30c2cf65ae1d90ca4bee28f208 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 3 Jun 2011 14:05:39 +0100 Subject: optimise routing by bypassing mnesia:dirty_select ...and going straight to an ets:select instead --- src/rabbit_router.erl | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index f6a1c92f..8f166672 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -84,21 +84,18 @@ match_bindings(SrcName, Match) -> mnesia:async_dirty(fun qlc:e/1, [Query]). match_routing_key(SrcName, [RoutingKey]) -> - MatchHead = #route{binding = #binding{source = SrcName, + find_routes(#route{binding = #binding{source = SrcName, destination = '$1', key = RoutingKey, _ = '_'}}, - mnesia:dirty_select(rabbit_route, [{MatchHead, [], ['$1']}]); + []); match_routing_key(SrcName, [_|_] = RoutingKeys) -> - Condition = list_to_tuple(['orelse' | [{'=:=', '$2', RKey} || - RKey <- RoutingKeys]]), - MatchHead = #route{binding = #binding{source = SrcName, + find_routes(#route{binding = #binding{source = SrcName, destination = '$1', key = '$2', _ = '_'}}, - mnesia:dirty_select(rabbit_route, [{MatchHead, [Condition], ['$1']}]). - - + [list_to_tuple(['orelse' | [{'=:=', '$2', RKey} || + RKey <- RoutingKeys]])]). %%-------------------------------------------------------------------- @@ -117,3 +114,25 @@ lookup_qpids(QNames) -> [] -> QPids end end, [], QNames). + +%% Normally we'd call mnesia:dirty_select/2 here, but that is quite +%% expensive due to +%% +%% 1) general mnesia overheads (figuring out table types and +%% locations, etc). We get away with bypassing these because we know +%% that the table +%% - is not the schema table +%% - has a local ram copy +%% - does not have any indices +%% +%% 2) 'fixing' of the table with ets:safe_fixtable/2, which is wholly +%% unnecessary. According to the ets docs (and the code in erl_db.c), +%% 'select' is safe anyway ("Functions that internally traverse over a +%% table, like select and match, will give the same guarantee as +%% safe_fixtable.") and, furthermore, even the lower level iterators +%% ('first' and 'next') are safe on ordered_set tables ("Note that for +%% tables of the ordered_set type, safe_fixtable/2 is not necessary as +%% calls to first/1 and next/2 will always succeed."), which +%% rabbit_route is. +find_routes(MatchHead, Conditions) -> + ets:select(rabbit_route, [{MatchHead, Conditions, ['$1']}]). -- cgit v1.2.1 From 0a4c43d10e88004e3912a91ee7be8efa8e50edc7 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 3 Jun 2011 15:23:53 +0100 Subject: More consitent exception handling --- src/rabbit_control.erl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 5f8d9f97..be1c08d8 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -343,9 +343,9 @@ action(list_permissions, Node, [], Opts, Inform) -> action(report, Node, _Args, _Opts, Inform) -> io:format("Reporting server status on ~p~n~n", [erlang:universaltime()]), [begin ok = action(Action, N, [], [], Inform), io:nl() end || - N <- rpc_call(Node, rabbit_mnesia, running_clustered_nodes, []), + N <- unsafe_rpc(Node, rabbit_mnesia, running_clustered_nodes, []), Action <- [status, cluster_status, environment]], - VHosts = rpc_call(Node, rabbit_vhost, list, []), + VHosts = unsafe_rpc(Node, rabbit_vhost, list, []), [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES], [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts], io:format("End of server status report~n"), @@ -428,11 +428,17 @@ display_list(Other) -> Other. display_call_result(Node, MFA) -> case call(Node, MFA) of - {badrpc, _} = Res -> Res; + {badrpc, _} = Res -> throw(Res); Res -> io:format("~p~n", [Res]), ok end. +unsafe_rpc(Node, Mod, Fun, Args) -> + case rpc_call(Node, Mod, Fun, Args) of + {badrpc, _} = Res -> throw(Res); + Normal -> Normal + end. + call(Node, {Mod, Fun, Args}) -> rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary/1, Args)). -- cgit v1.2.1 From 5a132aa4adae657eef9439279c6e2e3ea16a1ab0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 6 Jun 2011 15:17:12 +0100 Subject: Be more consistent about returning infos from rabbit_auth_backend_internal:list_*_permissions, fix rabbit_vhost:delete. Note that some of the info keys have changed to match those in mgmt, so we avoid a translation layer. --- src/rabbit_auth_backend_internal.erl | 67 +++++++++++++++++++----------------- src/rabbit_vhost.erl | 6 ++-- 2 files changed, 38 insertions(+), 35 deletions(-) diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index 7cbd5dca..2a42ff88 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -28,7 +28,9 @@ hash_password/1]). -export([set_permissions/5, clear_permissions/2, list_permissions/0, list_vhost_permissions/1, list_user_permissions/1, - list_user_vhost_permissions/2, vhost_perms_info_keys/0]). + list_user_vhost_permissions/2, perms_info_keys/0, + vhost_perms_info_keys/0, user_perms_info_keys/0, + user_vhost_perms_info_keys/0]). -include("rabbit_auth_backend_spec.hrl"). @@ -58,25 +60,23 @@ regexp(), regexp(), regexp()) -> 'ok'). -spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost()) -> 'ok'). --spec(list_permissions/0 :: - () -> [{rabbit_types:username(), rabbit_types:vhost(), - regexp(), regexp(), regexp()}]). +-spec(list_permissions/0 :: () -> rabbit_types:infos()). -spec(list_vhost_permissions/1 :: - (rabbit_types:vhost()) -> [{rabbit_types:username(), - regexp(), regexp(), regexp()}]). + (rabbit_types:vhost()) -> rabbit_types:infos()). -spec(list_user_permissions/1 :: - (rabbit_types:username()) -> [{rabbit_types:vhost(), - regexp(), regexp(), regexp()}]). + (rabbit_types:username()) -> rabbit_types:infos()). -spec(list_user_vhost_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost()) - -> [{regexp(), regexp(), regexp()}]). + -> rabbit_types:infos()). +-spec(perms_info_keys/0 :: () -> rabbit_types:info_keys()). -spec(vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()). - +-spec(user_perms_info_keys/0 :: () -> rabbit_types:info_keys()). +-spec(user_vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()). -endif. %%---------------------------------------------------------------------------- --define(PERMS_INFO_KEYS, [configure_perms, write_perms, read_perms]). +-define(PERMS_INFO_KEYS, [configure, write, read]). %% Implementation of rabbit_auth_backend @@ -286,35 +286,38 @@ clear_permissions(Username, VHostPath) -> virtual_host = VHostPath}}) end)). -vhost_perms_info_keys() -> [username] ++ ?PERMS_INFO_KEYS. +perms_info_keys() -> [user, vhost | ?PERMS_INFO_KEYS]. +vhost_perms_info_keys() -> [user | ?PERMS_INFO_KEYS]. +user_perms_info_keys() -> [vhost | ?PERMS_INFO_KEYS]. +user_vhost_perms_info_keys() -> ?PERMS_INFO_KEYS. list_permissions() -> - [{Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} || - {Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} <- - list_permissions(match_user_vhost('_', '_'))]. + list_permissions(perms_info_keys(), match_user_vhost('_', '_')). list_vhost_permissions(VHostPath) -> - InfoKeys = vhost_perms_info_keys(), - [lists:zip(InfoKeys, [Username, ConfigurePerm, WritePerm, ReadPerm]) || - {Username, _, ConfigurePerm, WritePerm, ReadPerm} <- - list_permissions(rabbit_vhost:with( - VHostPath, match_user_vhost('_', VHostPath)))]. + list_permissions( + vhost_perms_info_keys(), + rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath))). list_user_permissions(Username) -> - [{VHostPath, ConfigurePerm, WritePerm, ReadPerm} || - {_, VHostPath, ConfigurePerm, WritePerm, ReadPerm} <- - list_permissions(rabbit_misc:with_user( - Username, match_user_vhost(Username, '_')))]. + list_permissions( + user_perms_info_keys(), + rabbit_misc:with_user(Username, match_user_vhost(Username, '_'))). list_user_vhost_permissions(Username, VHostPath) -> - [{ConfigurePerm, WritePerm, ReadPerm} || - {_, _, ConfigurePerm, WritePerm, ReadPerm} <- - list_permissions(rabbit_misc:with_user_and_vhost( - Username, VHostPath, - match_user_vhost(Username, VHostPath)))]. - -list_permissions(QueryThunk) -> - [{Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm} || + list_permissions( + user_vhost_perms_info_keys(), + rabbit_misc:with_user_and_vhost( + Username, VHostPath, match_user_vhost(Username, VHostPath))). + +filter_props(Keys, Props) -> [T || T = {K, _} <- Props, lists:member(K, Keys)]. + +list_permissions(Keys, QueryThunk) -> + [filter_props(Keys, [{user, Username}, + {vhost, VHostPath}, + {configure, ConfigurePerm}, + {write, WritePerm}, + {read, ReadPerm}]) || #user_permission{user_vhost = #user_vhost{username = Username, virtual_host = VHostPath}, permission = #permission{ configure = ConfigurePerm, diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl index 5270d80b..08d6c99a 100644 --- a/src/rabbit_vhost.erl +++ b/src/rabbit_vhost.erl @@ -91,9 +91,9 @@ delete(VHostPath) -> internal_delete(VHostPath) -> lists:foreach( - fun ({Username, _, _, _}) -> - ok = rabbit_auth_backend_internal:clear_permissions(Username, - VHostPath) + fun (Info) -> + ok = rabbit_auth_backend_internal:clear_permissions( + proplists:get_value(user, Info), VHostPath) end, rabbit_auth_backend_internal:list_vhost_permissions(VHostPath)), ok = mnesia:delete({rabbit_vhost, VHostPath}), -- cgit v1.2.1 From 8dac4b06c6f2f7be15d12f4e6e764fd25c5bf487 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 6 Jun 2011 15:37:48 +0100 Subject: Ahem --- src/rabbit_control.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 1fef76ee..355ac549 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -262,8 +262,9 @@ action(list_vhosts, Node, Args, _Opts, Inform) -> action(list_user_permissions, Node, Args = [_Username], _Opts, Inform) -> Inform("Listing permissions for user ~p", Args), - display_list(call(Node, {rabbit_auth_backend_internal, - list_user_permissions, Args})); + display_info_list(call(Node, {rabbit_auth_backend_internal, + list_user_permissions, Args}), + rabbit_auth_backend_internal:user_perms_info_keys()); action(list_queues, Node, Args, Opts, Inform) -> Inform("Listing queues", []), -- cgit v1.2.1 From 9cd09fb24f30b5d2c48497094834f5e697b97803 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 6 Jun 2011 15:51:17 +0100 Subject: Explain what we list by default. --- docs/rabbitmqctl.1.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index b8d6fbd3..06dcfff7 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -648,6 +648,10 @@ Whether tracing is enabled for this virtual host. + + If no vhostinfoitems are specified + then the vhost name is displayed. + For example: rabbitmqctl list_vhosts name tracing -- cgit v1.2.1 From 27de37f3523af36cc2cb88fcb0955ed5cdd7872f Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 7 Jun 2011 11:43:39 +0100 Subject: erl_call treats all communication with the node as success, so try to emulate it. --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a347689b..7ca68f20 100644 --- a/Makefile +++ b/Makefile @@ -161,8 +161,11 @@ run-node: all RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \ ./scripts/rabbitmq-server +# erl_call treats all communication with the node as success, so we +# have to emulate it. run-tests: all - echo "rabbit_tests:all_tests()." | $(ERL_CALL) + erl -sname foo -noinput -eval \ + "case rpc:call(rabbit@$(shell hostname -s), rabbit_tests, all_tests, []) of passed -> halt(0); E -> io:format(\"~n~p~n~n\", [E]), halt(1) end." start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ -- cgit v1.2.1 From a29ef94024f2792f44cd54f11e1f2fde148f122c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 7 Jun 2011 12:24:58 +0100 Subject: OK, so that approach does not work, due to the node started by erl somehow imposing some of its configuration (in particular SASL) on the tests call, and making the log tests fail. Let's just parse the output... --- Makefile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 7ca68f20..ea460f12 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml)) WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml) USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML))) +TMP_TEST_OUT=/tmp/rabbitmq-server-test-output ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes) PYTHON=python @@ -161,11 +162,11 @@ run-node: all RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \ ./scripts/rabbitmq-server -# erl_call treats all communication with the node as success, so we -# have to emulate it. run-tests: all - erl -sname foo -noinput -eval \ - "case rpc:call(rabbit@$(shell hostname -s), rabbit_tests, all_tests, []) of passed -> halt(0); E -> io:format(\"~n~p~n~n\", [E]), halt(1) end." + echo "rabbit_tests:all_tests()." | $(ERL_CALL) | \ + tee $(TMP_TEST_OUT) | tail -n 1 | \ + grep '^{ok, passed}$$' > /dev/null && rm $(TMP_TEST_OUT) || \ + (cat $(TMP_TEST_OUT) && echo && rm $(TMP_TEST_OUT) && false) start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ -- cgit v1.2.1 From e60cc6b8d7050c924609a24b7e8ad699382cf2b8 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 7 Jun 2011 13:10:35 +0100 Subject: Another version, avoiding a temp var --- Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index ea460f12..d8ef058e 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,6 @@ MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml)) WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml) USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML))) -TMP_TEST_OUT=/tmp/rabbitmq-server-test-output ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes) PYTHON=python @@ -163,10 +162,8 @@ run-node: all ./scripts/rabbitmq-server run-tests: all - echo "rabbit_tests:all_tests()." | $(ERL_CALL) | \ - tee $(TMP_TEST_OUT) | tail -n 1 | \ - grep '^{ok, passed}$$' > /dev/null && rm $(TMP_TEST_OUT) || \ - (cat $(TMP_TEST_OUT) && echo && rm $(TMP_TEST_OUT) && false) + OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \ + echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ -- cgit v1.2.1 From c3f40599ea9372bfff768adb8102a3c415d13910 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 9 Jun 2011 12:13:40 +0100 Subject: Make the stats interval configurable. --- ebin/rabbit_app.in | 1 + include/rabbit.hrl | 1 - src/rabbit_event.erl | 12 +++++++----- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 7dabb8c3..0afd7da6 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -33,6 +33,7 @@ {cluster_nodes, []}, {server_properties, []}, {collect_statistics, none}, + {collect_statistics_interval, 5000}, {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, diff --git a/include/rabbit.hrl b/include/rabbit.hrl index db4773b8..67e2dfe5 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -86,7 +86,6 @@ -define(HIBERNATE_AFTER_MIN, 1000). -define(DESIRED_HIBERNATE, 10000). --define(STATS_INTERVAL, 5000). -define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]). -define(DELETED_HEADER, <<"BCC">>). diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 9ed532db..468f9293 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -26,7 +26,7 @@ %%---------------------------------------------------------------------------- --record(state, {level, timer}). +-record(state, {level, interval, timer}). %%---------------------------------------------------------------------------- @@ -49,6 +49,7 @@ -opaque(state() :: #state { level :: level(), + interval :: integer(), timer :: atom() }). @@ -95,13 +96,14 @@ start_link() -> init_stats_timer() -> {ok, StatsLevel} = application:get_env(rabbit, collect_statistics), - #state{level = StatsLevel, timer = undefined}. + {ok, Interval} = application:get_env(rabbit, collect_statistics_interval), + #state{level = StatsLevel, interval = Interval, timer = undefined}. ensure_stats_timer(State = #state{level = none}, _Fun) -> State; -ensure_stats_timer(State = #state{timer = undefined}, Fun) -> - {ok, TRef} = timer:apply_after(?STATS_INTERVAL, - erlang, apply, [Fun, []]), +ensure_stats_timer(State = #state{interval = Interval, + timer = undefined}, Fun) -> + {ok, TRef} = timer:apply_after(Interval, erlang, apply, [Fun, []]), State#state{timer = TRef}; ensure_stats_timer(State, _Fun) -> State. -- cgit v1.2.1 From 30de0b6a35945cab02e95c5ab627ee5e61874b8d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 9 Jun 2011 14:15:17 +0100 Subject: First pass at user tags. --- docs/rabbitmqctl.1.xml | 33 +++++++----------- ebin/rabbit_app.in | 2 +- include/rabbit.hrl | 4 +-- src/rabbit.erl | 7 ++-- src/rabbit_auth_backend_internal.erl | 66 +++++++++++++++++++----------------- src/rabbit_control.erl | 27 +++++---------- src/rabbit_tests.erl | 8 ++--- src/rabbit_types.erl | 4 +-- src/rabbit_upgrade_functions.erl | 12 +++++++ 9 files changed, 77 insertions(+), 86 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 06dcfff7..a0f03192 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -513,17 +513,22 @@ - set_admin username + set_user_tags username tag ... username - The name of the user whose administrative - status is to be set. + The name of the user whose tags are to + be set. + + + tag + Zero, one or more tags to set. Any + existing tags will be removed. For example: - rabbitmqctl set_admin tonyg + rabbitmqctl set_user_tags tonyg administrator This command instructs the RabbitMQ broker to ensure the user named tonyg is an administrator. This has no @@ -532,24 +537,10 @@ user logs in via some other means (for example with the management plugin). - - - - - clear_admin username - - - - username - The name of the user whose administrative - status is to be cleared. - - - For example: - rabbitmqctl clear_admin tonyg + rabbitmqctl set_user_tags tonyg - This command instructs the RabbitMQ broker to ensure the user - named tonyg is not an administrator. + This command instructs the RabbitMQ broker to remove any + tags from the user named tonyg. diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 7dabb8c3..6d939c59 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -27,7 +27,7 @@ {queue_index_max_journal_entries, 262144}, {default_user, <<"guest">>}, {default_pass, <<"guest">>}, - {default_user_is_admin, true}, + {default_user_tags, [administrator]}, {default_vhost, <<"/">>}, {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, {cluster_nodes, []}, diff --git a/include/rabbit.hrl b/include/rabbit.hrl index db4773b8..ee6e67b6 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -15,12 +15,12 @@ %% -record(user, {username, - is_admin, + tags, auth_backend, %% Module this user came from impl %% Scratch space for that module }). --record(internal_user, {username, password_hash, is_admin}). +-record(internal_user, {username, password_hash, tags}). -record(permission, {configure, write, read}). -record(user_vhost, {username, virtual_host}). -record(user_permission, {user_vhost, permission}). diff --git a/src/rabbit.erl b/src/rabbit.erl index 8866a1b7..100cacb0 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -486,16 +486,13 @@ maybe_insert_default_data() -> insert_default_data() -> {ok, DefaultUser} = application:get_env(default_user), {ok, DefaultPass} = application:get_env(default_pass), - {ok, DefaultAdmin} = application:get_env(default_user_is_admin), + {ok, DefaultTags} = application:get_env(default_user_tags), {ok, DefaultVHost} = application:get_env(default_vhost), {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} = application:get_env(default_permissions), ok = rabbit_vhost:add(DefaultVHost), ok = rabbit_auth_backend_internal:add_user(DefaultUser, DefaultPass), - case DefaultAdmin of - true -> rabbit_auth_backend_internal:set_admin(DefaultUser); - _ -> ok - end, + ok = rabbit_auth_backend_internal:set_tags(DefaultUser, DefaultTags), ok = rabbit_auth_backend_internal:set_permissions(DefaultUser, DefaultVHost, DefaultConfigurePerm, DefaultWritePerm, diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index 2a42ff88..96ada603 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -22,8 +22,8 @@ -export([description/0]). -export([check_user_login/2, check_vhost_access/3, check_resource_access/3]). --export([add_user/2, delete_user/1, change_password/2, set_admin/1, - clear_admin/1, list_users/0, lookup_user/1, clear_password/1]). +-export([add_user/2, delete_user/1, change_password/2, set_tags/2, + list_users/0, user_info_keys/0, lookup_user/1, clear_password/1]). -export([make_salt/0, check_password/2, change_password_hash/2, hash_password/1]). -export([set_permissions/5, clear_permissions/2, @@ -50,9 +50,9 @@ rabbit_types:password_hash()) -> 'ok'). -spec(hash_password/1 :: (rabbit_types:password()) -> rabbit_types:password_hash()). --spec(set_admin/1 :: (rabbit_types:username()) -> 'ok'). --spec(clear_admin/1 :: (rabbit_types:username()) -> 'ok'). --spec(list_users/0 :: () -> [{rabbit_types:username(), boolean()}]). +-spec(set_tags/2 :: (rabbit_types:username(), [atom()]) -> 'ok'). +-spec(list_users/0 :: () -> rabbit_types:infos()). +-spec(user_info_keys/0 :: () -> rabbit_types:info_keys()). -spec(lookup_user/1 :: (rabbit_types:username()) -> rabbit_types:ok(rabbit_types:internal_user()) | rabbit_types:error('not_found')). @@ -77,6 +77,7 @@ %%---------------------------------------------------------------------------- -define(PERMS_INFO_KEYS, [configure, write, read]). +-define(USER_INFO_KEYS, [user, tags]). %% Implementation of rabbit_auth_backend @@ -97,10 +98,10 @@ check_user_login(Username, AuthProps) -> internal_check_user_login(Username, Fun) -> Refused = {refused, "user '~s' - invalid credentials", [Username]}, case lookup_user(Username) of - {ok, User = #internal_user{is_admin = IsAdmin}} -> + {ok, User = #internal_user{tags = Tags}} -> case Fun(User) of true -> {ok, #user{username = Username, - is_admin = IsAdmin, + tags = Tags, auth_backend = ?MODULE, impl = User}}; _ -> Refused @@ -109,20 +110,23 @@ internal_check_user_login(Username, Fun) -> Refused end. -check_vhost_access(#user{is_admin = true}, _VHostPath, read) -> - true; - -check_vhost_access(#user{username = Username}, VHostPath, _) -> - %% TODO: use dirty ops instead - rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:read({rabbit_user_permission, - #user_vhost{username = Username, - virtual_host = VHostPath}}) of - [] -> false; - [_R] -> true - end - end). +check_vhost_access(#user{username = Username, tags = Tags}, VHost, Mode) -> + Admin = lists:any(fun(T) -> lists:member(T, [administrator]) end, Tags), + case {Admin, Mode} of + {true, read} -> + true; + _ -> + %% TODO: use dirty ops instead + rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:read({rabbit_user_permission, + #user_vhost{username = Username, + virtual_host = VHost}}) of + [] -> false; + [_R] -> true + end + end) + end. check_resource_access(#user{username = Username}, #resource{virtual_host = VHostPath, name = Name}, @@ -161,7 +165,7 @@ add_user(Username, Password) -> #internal_user{username = Username, password_hash = hash_password(Password), - is_admin = false}, + tags = []}, write); _ -> mnesia:abort({user_already_exists, Username}) @@ -222,16 +226,12 @@ salted_md5(Salt, Cleartext) -> Salted = <>, erlang:md5(Salted). -set_admin(Username) -> set_admin(Username, true). - -clear_admin(Username) -> set_admin(Username, false). - -set_admin(Username, IsAdmin) -> +set_tags(Username, Tags) -> R = update_user(Username, fun(User) -> - User#internal_user{is_admin = IsAdmin} + User#internal_user{tags = Tags} end), - rabbit_log:info("Set user admin flag for user ~p to ~p~n", - [Username, IsAdmin]), + rabbit_log:info("Set user tags for user ~p to ~p~n", + [Username, Tags]), R. update_user(Username, Fun) -> @@ -244,10 +244,12 @@ update_user(Username, Fun) -> end)). list_users() -> - [{Username, IsAdmin} || - #internal_user{username = Username, is_admin = IsAdmin} <- + [[{user, Username}, {tags, Tags}] || + #internal_user{username = Username, tags = Tags} <- mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})]. +user_info_keys() -> ?USER_INFO_KEYS. + lookup_user(Username) -> rabbit_misc:dirty_read({rabbit_user, Username}). diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 355ac549..9eef384a 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -235,17 +235,17 @@ action(clear_password, Node, Args = [Username], _Opts, Inform) -> Inform("Clearing password for user ~p", [Username]), call(Node, {rabbit_auth_backend_internal, clear_password, Args}); -action(set_admin, Node, [Username], _Opts, Inform) -> - Inform("Setting administrative status for user ~p", [Username]), - call(Node, {rabbit_auth_backend_internal, set_admin, [Username]}); - -action(clear_admin, Node, [Username], _Opts, Inform) -> - Inform("Clearing administrative status for user ~p", [Username]), - call(Node, {rabbit_auth_backend_internal, clear_admin, [Username]}); +action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) -> + Tags = [list_to_atom(T) || T <- TagsStr], + Inform("Setting tags for user ~p to ~p", [Username, Tags]), + rpc_call(Node, rabbit_auth_backend_internal, set_tags, + [list_to_binary(Username), Tags]); action(list_users, Node, [], _Opts, Inform) -> Inform("Listing users", []), - display_list(call(Node, {rabbit_auth_backend_internal, list_users, []})); + display_info_list( + call(Node, {rabbit_auth_backend_internal, list_users, []}), + rabbit_auth_backend_internal:user_info_keys()); action(add_vhost, Node, Args = [_VHostPath], _Opts, Inform) -> Inform("Creating vhost ~p", Args), @@ -422,17 +422,6 @@ format_info_item([T | _] = Value) format_info_item(Value) -> io_lib:format("~w", [Value]). -display_list(L) when is_list(L) -> - lists:foreach(fun (I) when is_binary(I) -> - io:format("~s~n", [escape(I)]); - (I) when is_tuple(I) -> - display_row([escape(V) - || V <- tuple_to_list(I)]) - end, - lists:sort(L)), - ok; -display_list(Other) -> Other. - display_call_result(Node, MFA) -> case call(Node, MFA) of {badrpc, _} = Res -> throw(Res); diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 3f4aa54e..b71d17d0 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1078,8 +1078,8 @@ test_user_management() -> {error, {user_already_exists, _}} = control_action(add_user, ["foo", "bar"]), ok = control_action(change_password, ["foo", "baz"]), - ok = control_action(set_admin, ["foo"]), - ok = control_action(clear_admin, ["foo"]), + ok = control_action(set_tags, ["foo", "administrator"]), + ok = control_action(set_tags, ["foo"]), ok = control_action(list_users, []), %% vhost creation @@ -1203,10 +1203,10 @@ test_spawn() -> user(Username) -> #user{username = Username, - is_admin = true, + tags = [administrator], auth_backend = rabbit_auth_backend_internal, impl = #internal_user{username = Username, - is_admin = true}}. + tags = [administrator]}}. test_statistics_event_receiver(Pid) -> receive diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 1f0f8bbe..a18118e3 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -139,14 +139,14 @@ -type(user() :: #user{username :: username(), - is_admin :: boolean(), + tags :: [atom()], auth_backend :: atom(), impl :: any()}). -type(internal_user() :: #internal_user{username :: username(), password_hash :: password_hash(), - is_admin :: boolean()}). + tags :: [atom()]}). -type(username() :: binary()). -type(password() :: binary()). diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 5e4a1224..18cdc83c 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -29,6 +29,7 @@ -rabbit_upgrade({semi_durable_route, mnesia, []}). -rabbit_upgrade({exchange_event_serial, mnesia, []}). -rabbit_upgrade({trace_exchanges, mnesia, []}). +-rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}). %% ------------------------------------------------------------------- @@ -43,6 +44,7 @@ -spec(semi_durable_route/0 :: () -> 'ok'). -spec(exchange_event_serial/0 :: () -> 'ok'). -spec(trace_exchanges/0 :: () -> 'ok'). +-spec(user_admin_to_tags/0 :: () -> 'ok'). -endif. @@ -121,6 +123,16 @@ trace_exchanges() -> VHost <- rabbit_vhost:list()], ok. +user_admin_to_tags() -> + transform( + rabbit_user, + fun({internal_user, Username, PasswordHash, true}) -> + {internal_user, Username, PasswordHash, [administrator]}; + ({internal_user, Username, PasswordHash, false}) -> + {internal_user, Username, PasswordHash, []} + end, + [username, password_hash, tags], internal_user). + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 51fb33b6a5814c55872ead051be665ad4a61ac8e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 9 Jun 2011 15:30:06 +0100 Subject: Since we're changing the backend API anyway, let's remove this management-specific wart. --- include/rabbit_auth_backend_spec.hrl | 3 +-- src/rabbit_access_control.erl | 25 ++----------------------- src/rabbit_auth_backend.erl | 8 ++------ src/rabbit_auth_backend_internal.erl | 30 ++++++++++++------------------ 4 files changed, 17 insertions(+), 49 deletions(-) diff --git a/include/rabbit_auth_backend_spec.hrl b/include/rabbit_auth_backend_spec.hrl index e26d44ea..803bb75c 100644 --- a/include/rabbit_auth_backend_spec.hrl +++ b/include/rabbit_auth_backend_spec.hrl @@ -22,8 +22,7 @@ {'ok', rabbit_types:user()} | {'refused', string(), [any()]} | {'error', any()}). --spec(check_vhost_access/3 :: (rabbit_types:user(), rabbit_types:vhost(), - rabbit_access_control:vhost_permission_atom()) -> +-spec(check_vhost_access/2 :: (rabbit_types:user(), rabbit_types:vhost()) -> boolean() | {'error', any()}). -spec(check_resource_access/3 :: (rabbit_types:user(), rabbit_types:r(atom()), diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl index 59c00848..7fce7bd0 100644 --- a/src/rabbit_access_control.erl +++ b/src/rabbit_access_control.erl @@ -19,16 +19,15 @@ -include("rabbit.hrl"). -export([check_user_pass_login/2, check_user_login/2, - check_vhost_access/2, check_resource_access/3, list_vhosts/2]). + check_vhost_access/2, check_resource_access/3]). %%---------------------------------------------------------------------------- -ifdef(use_specs). --export_type([permission_atom/0, vhost_permission_atom/0]). +-export_type([permission_atom/0]). -type(permission_atom() :: 'configure' | 'read' | 'write'). --type(vhost_permission_atom() :: 'read' | 'write'). -spec(check_user_pass_login/2 :: (rabbit_types:username(), rabbit_types:password()) @@ -39,8 +38,6 @@ -spec(check_resource_access/3 :: (rabbit_types:user(), rabbit_types:r(atom()), permission_atom()) -> 'ok' | rabbit_types:channel_exit()). --spec(list_vhosts/2 :: (rabbit_types:user(), vhost_permission_atom()) - -> [rabbit_types:vhost()]). -endif. @@ -104,21 +101,3 @@ check_access(Fun, ErrStr, ErrArgs, RefStr, RefArgs) -> false -> rabbit_misc:protocol_error(access_refused, RefStr, RefArgs) end. - -%% Permission = write -> log in -%% Permission = read -> learn of the existence of (only relevant for -%% management plugin) -list_vhosts(User = #user{username = Username, auth_backend = Module}, - Permission) -> - lists:filter( - fun(VHost) -> - case Module:check_vhost_access(User, VHost, Permission) of - {error, _} = E -> - rabbit_log:warning("~w failed checking vhost access " - "to ~s for ~s: ~p~n", - [Module, VHost, Username, E]), - false; - Else -> - Else - end - end, rabbit_vhost:list()). diff --git a/src/rabbit_auth_backend.erl b/src/rabbit_auth_backend.erl index 09820c5b..ade158bb 100644 --- a/src/rabbit_auth_backend.erl +++ b/src/rabbit_auth_backend.erl @@ -36,17 +36,13 @@ behaviour_info(callbacks) -> %% Client failed authentication. Log and die. {check_user_login, 2}, - %% Given #user, vhost path and permission, can a user access a vhost? - %% Permission is read - learn of the existence of (only relevant for - %% management plugin) - %% or write - log in - %% + %% Given #user and vhost, can a user log in to a vhost? %% Possible responses: %% true %% false %% {error, Error} %% Something went wrong. Log and die. - {check_vhost_access, 3}, + {check_vhost_access, 2}, %% Given #user, resource and permission, can a user access a resource? %% diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl index 96ada603..6a018bd1 100644 --- a/src/rabbit_auth_backend_internal.erl +++ b/src/rabbit_auth_backend_internal.erl @@ -20,7 +20,7 @@ -behaviour(rabbit_auth_backend). -export([description/0]). --export([check_user_login/2, check_vhost_access/3, check_resource_access/3]). +-export([check_user_login/2, check_vhost_access/2, check_resource_access/3]). -export([add_user/2, delete_user/1, change_password/2, set_tags/2, list_users/0, user_info_keys/0, lookup_user/1, clear_password/1]). @@ -110,23 +110,17 @@ internal_check_user_login(Username, Fun) -> Refused end. -check_vhost_access(#user{username = Username, tags = Tags}, VHost, Mode) -> - Admin = lists:any(fun(T) -> lists:member(T, [administrator]) end, Tags), - case {Admin, Mode} of - {true, read} -> - true; - _ -> - %% TODO: use dirty ops instead - rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:read({rabbit_user_permission, - #user_vhost{username = Username, - virtual_host = VHost}}) of - [] -> false; - [_R] -> true - end - end) - end. +check_vhost_access(#user{username = Username}, VHost) -> + %% TODO: use dirty ops instead + rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:read({rabbit_user_permission, + #user_vhost{username = Username, + virtual_host = VHost}}) of + [] -> false; + [_R] -> true + end + end). check_resource_access(#user{username = Username}, #resource{virtual_host = VHostPath, name = Name}, -- cgit v1.2.1 From a627c21ee80d9e49a655aab0ef24d65103dfc404 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 9 Jun 2011 15:53:49 +0100 Subject: Oops --- src/rabbit_access_control.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl index 7fce7bd0..c0ae18c0 100644 --- a/src/rabbit_access_control.erl +++ b/src/rabbit_access_control.erl @@ -67,7 +67,7 @@ check_vhost_access(User = #user{ username = Username, check_access( fun() -> rabbit_vhost:exists(VHostPath) andalso - Module:check_vhost_access(User, VHostPath, write) + Module:check_vhost_access(User, VHostPath) end, "~s failed checking vhost access to ~s for ~s: ~p~n", [Module, VHostPath, Username], -- cgit v1.2.1 From 5510d8e28617c475104e45c90a076f136c09f103 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 9 Jun 2011 16:43:17 +0100 Subject: Elimination of trivial and pointless abstraction --- src/rabbit_amqqueue_process.erl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 07a24af8..1e5ad349 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -431,9 +431,7 @@ confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) -> {CMs, MTC0} end end, {gb_trees:empty(), MTC}, MsgIds), - gb_trees_foreach(fun(ChPid, MsgSeqNos) -> - rabbit_channel:confirm(ChPid, MsgSeqNos) - end, CMs), + gb_trees_foreach(fun rabbit_channel:confirm/2, CMs), State#q{msg_id_to_channel = MTC1}. gb_trees_foreach(_, none) -> -- cgit v1.2.1 From 96aadc11e34fd219a20f8246b98631397451fcaf Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 10 Jun 2011 10:33:17 +0100 Subject: Improve (and, err, fix) tests --- src/rabbit_tests.erl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index b71d17d0..f5e6eef7 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1078,8 +1078,15 @@ test_user_management() -> {error, {user_already_exists, _}} = control_action(add_user, ["foo", "bar"]), ok = control_action(change_password, ["foo", "baz"]), - ok = control_action(set_tags, ["foo", "administrator"]), - ok = control_action(set_tags, ["foo"]), + ok = control_action(set_user_tags, ["foo", "foo", "bar", "bash"]), + {ok, #internal_user{tags = [foo, bar, bash]}} = + rabbit_auth_backend_internal:lookup_user(<<"foo">>), + ok = control_action(set_user_tags, ["foo", "administrator"]), + {ok, #internal_user{tags = [administrator]}} = + rabbit_auth_backend_internal:lookup_user(<<"foo">>), + ok = control_action(set_user_tags, ["foo"]), + {ok, #internal_user{tags = []}} = + rabbit_auth_backend_internal:lookup_user(<<"foo">>), ok = control_action(list_users, []), %% vhost creation -- cgit v1.2.1 From db15d108e7ca9194f20812409f7a22c4a2b85d94 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 10 Jun 2011 12:03:34 +0100 Subject: Further improve tests --- src/rabbit_tests.erl | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index f5e6eef7..65c5ca50 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1072,22 +1072,25 @@ test_user_management() -> control_action(list_permissions, [], [{"-p", "/testhost"}]), {error, {invalid_regexp, _, _}} = control_action(set_permissions, ["guest", "+foo", ".*", ".*"]), + {error, {no_such_user, _}} = + control_action(set_user_tags, ["foo", "bar"]), %% user creation ok = control_action(add_user, ["foo", "bar"]), {error, {user_already_exists, _}} = control_action(add_user, ["foo", "bar"]), ok = control_action(change_password, ["foo", "baz"]), - ok = control_action(set_user_tags, ["foo", "foo", "bar", "bash"]), - {ok, #internal_user{tags = [foo, bar, bash]}} = - rabbit_auth_backend_internal:lookup_user(<<"foo">>), - ok = control_action(set_user_tags, ["foo", "administrator"]), - {ok, #internal_user{tags = [administrator]}} = - rabbit_auth_backend_internal:lookup_user(<<"foo">>), - ok = control_action(set_user_tags, ["foo"]), - {ok, #internal_user{tags = []}} = - rabbit_auth_backend_internal:lookup_user(<<"foo">>), - ok = control_action(list_users, []), + + TestTags = fun (Tags) -> + Args = ["foo" | [atom_to_list(T) || T <- Tags]], + ok = control_action(set_user_tags, Args), + {ok, #internal_user{tags = Tags}} = + rabbit_auth_backend_internal:lookup_user(<<"foo">>), + ok = control_action(list_users, []) + end, + TestTags([foo, bar, bash]), + TestTags([administrator]), + TestTags([]), %% vhost creation ok = control_action(add_vhost, ["/testhost"]), -- cgit v1.2.1 From e9e755b97a1c8a49a90c4ec074b36db4ed06bf0a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 10 Jun 2011 12:23:24 +0100 Subject: Yes, this is really what the QA process has come to. --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 65c5ca50..f5492cdc 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1088,7 +1088,7 @@ test_user_management() -> rabbit_auth_backend_internal:lookup_user(<<"foo">>), ok = control_action(list_users, []) end, - TestTags([foo, bar, bash]), + TestTags([foo, bar, baz]), TestTags([administrator]), TestTags([]), -- cgit v1.2.1 From 146accf59cae03cc580eb05326bdc457ff4c4b46 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 13 Jun 2011 14:51:16 +0100 Subject: Convert non-administrators to "management-user" (see bug 23664) --- src/rabbit_upgrade_functions.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 18cdc83c..0106f034 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -129,7 +129,7 @@ user_admin_to_tags() -> fun({internal_user, Username, PasswordHash, true}) -> {internal_user, Username, PasswordHash, [administrator]}; ({internal_user, Username, PasswordHash, false}) -> - {internal_user, Username, PasswordHash, []} + {internal_user, Username, PasswordHash, ['management-user']} end, [username, password_hash, tags], internal_user). -- cgit v1.2.1 From 2c1dfeb271deb99d0e6299accddb922b4e6cc585 Mon Sep 17 00:00:00 2001 From: Jerry Kuch Date: Tue, 14 Jun 2011 05:53:10 -0700 Subject: Package changelog mods for 2.5.0 --- packaging/RPMS/Fedora/rabbitmq-server.spec | 3 +++ packaging/debs/Debian/debian/changelog | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec index f9e9df8b..ffc826eb 100644 --- a/packaging/RPMS/Fedora/rabbitmq-server.spec +++ b/packaging/RPMS/Fedora/rabbitmq-server.spec @@ -120,6 +120,9 @@ done rm -rf %{buildroot} %changelog +* Thu Jun 9 2011 jerryk@vmware.com 2.5.0-1 +- New Upstream Release + * Thu Apr 7 2011 Alexandru Scvortov 2.4.1-1 - New Upstream Release diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog index 0383b955..1cab4235 100644 --- a/packaging/debs/Debian/debian/changelog +++ b/packaging/debs/Debian/debian/changelog @@ -1,3 +1,9 @@ +rabbitmq-server (2.5.0-1) lucid; urgency=low + + * New Upstream Release + + -- Thu, 09 Jun 2011 07:20:29 -0700 + rabbitmq-server (2.4.1-1) lucid; urgency=low * New Upstream Release -- cgit v1.2.1 -- cgit v1.2.1 From 6154fbdfb428a524640a361fd99d79a1f04f0f8d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 16 Jun 2011 14:32:02 +0100 Subject: Matthias points out that these pertain to the old persister which we no longer ship. --- ebin/rabbit_app.in | 2 -- 1 file changed, 2 deletions(-) diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 0afd7da6..d06f167e 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -21,8 +21,6 @@ {msg_store_index_module, rabbit_msg_store_ets_index}, {backing_queue_module, rabbit_variable_queue}, {frame_max, 131072}, - {persister_max_wrap_entries, 500}, - {persister_hibernate_after, 10000}, {msg_store_file_size_limit, 16777216}, {queue_index_max_journal_entries, 262144}, {default_user, <<"guest">>}, -- cgit v1.2.1 From 9b9001f19ba4010fcc478750d94cc4740046524d Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 17 Jun 2011 13:35:44 +0100 Subject: It's now 'management'. --- src/rabbit_upgrade_functions.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 0106f034..c2dd860a 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -129,7 +129,7 @@ user_admin_to_tags() -> fun({internal_user, Username, PasswordHash, true}) -> {internal_user, Username, PasswordHash, [administrator]}; ({internal_user, Username, PasswordHash, false}) -> - {internal_user, Username, PasswordHash, ['management-user']} + {internal_user, Username, PasswordHash, [management]} end, [username, password_hash, tags], internal_user). -- cgit v1.2.1 From 8c55cc1fdaebaee2f2adc90453b36e2590686cdc Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 17 Jun 2011 15:53:37 +0100 Subject: Allow unprivileged users to passive declare. --- src/rabbit_channel.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 991b0b06..49b61600 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -894,7 +894,6 @@ handle_method(#'exchange.declare'{exchange = ExchangeNameBin, nowait = NoWait}, _, State = #ch{virtual_host = VHostPath}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), - check_configure_permitted(ExchangeName, State), check_not_default_exchange(ExchangeName), _ = rabbit_exchange:lookup_or_die(ExchangeName), return_ok(State, NoWait, #'exchange.declare_ok'{}); @@ -990,7 +989,6 @@ handle_method(#'queue.declare'{queue = QueueNameBin, _, State = #ch{virtual_host = VHostPath, conn_pid = ConnPid}) -> QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin), - check_configure_permitted(QueueName, State), {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} = rabbit_amqqueue:with_or_die( QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end), -- cgit v1.2.1 From 4b50006fe43430efc36cf84b4a553ff94937ef42 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 20 Jun 2011 15:47:04 +0100 Subject: Generate fine stats on redeliver. --- src/rabbit_channel.erl | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 991b0b06..51d844c9 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -286,12 +286,11 @@ handle_cast({deliver, ConsumerTag, AckRequired, exchange = ExchangeName#resource.name, routing_key = RoutingKey}, rabbit_writer:send_command_and_notify(WriterPid, QPid, self(), M, Content), - - maybe_incr_stats([{QPid, 1}], - case AckRequired of - true -> deliver; - false -> deliver_no_ack - end, State), + maybe_incr_stats([{QPid, 1}], case AckRequired of + true -> deliver; + false -> deliver_no_ack + end, State), + maybe_incr_redeliver(Redelivered, QPid, State), rabbit_trace:tap_trace_out(Msg, TraceState), noreply(State1#ch{next_tag = DeliveryTag + 1}); @@ -690,6 +689,7 @@ handle_method(#'basic.get'{queue = QueueNameBin, true -> get_no_ack; false -> get end, State), + maybe_incr_redeliver(Redelivered, QPid, State), rabbit_trace:tap_trace_out(Msg, TraceState), ok = rabbit_writer:send_command( WriterPid, @@ -1454,6 +1454,11 @@ i(client_flow_blocked, #ch{limiter_pid = LimiterPid}) -> i(Item, _) -> throw({bad_argument, Item}). +maybe_incr_redeliver(true, QPid, State) -> + maybe_incr_stats([{QPid, 1}], redeliver, State); +maybe_incr_redeliver(_, _, _) -> + ok. + maybe_incr_stats(QXIncs, Measure, #ch{stats_timer = StatsTimer}) -> case rabbit_event:stats_level(StatsTimer) of fine -> [incr_stats(QX, Inc, Measure) || {QX, Inc} <- QXIncs]; -- cgit v1.2.1 From 2bbc2debb3732696ad3bb97356b8e505ddb1f7c2 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 20 Jun 2011 17:51:30 +0100 Subject: First draft of qc property for backing store --- src/rabbit_backing_queue_qc.erl | 181 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 src/rabbit_backing_queue_qc.erl diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl new file mode 100644 index 00000000..95832842 --- /dev/null +++ b/src/rabbit_backing_queue_qc.erl @@ -0,0 +1,181 @@ +%% The contents of this file are subject to the Mozilla Public License +%% Version 1.1 (the "License"); you may not use this file except in +%% compliance with the License. You may obtain a copy of the License +%% at http://www.mozilla.org/MPL/ +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and +%% limitations under the License. +%% +%% The Original Code is RabbitMQ. +%% +%% The Initial Developer of the Original Code is VMware, Inc. +%% Copyright (c) 2007-2011 VMware, Inc. All rights reserved. +%% + +-module(rabbit_backing_queue_qc). + +-behaviour(proper_statem). + +-export([initial_state/0, command/1, precondition/2, postcondition/3, + next_state/3]). + +-export([prop_backing_queue_test/0]). + + +-record(state, {bqstate, + messages, + acks}). + +-include("rabbit.hrl"). +-include("rabbit_framing.hrl"). +-include_lib("proper/include/proper.hrl"). + +initial_state() -> + VQ = qc_variable_queue_init(qc_test_queue()), + #state{bqstate=VQ, messages = [], acks = []}. + +prop_backing_queue_test() -> + ?FORALL(Cmds, commands(?MODULE, initial_state()), + begin + {_H, _S, Res} = run_commands(?MODULE, Cmds), + ?WHENFAIL( + io:format("Result: ~p~n", [Res]), + aggregate(command_names(Cmds), Res =:= ok)) + end). + +%% Commands + +command(S) -> + frequency([{5, qc_publish(S)}, + {3, qc_fetch(S)}, + {2, qc_ack(S)}, + {2, qc_requeue(S)}, + {1, qc_ram(S)}]). + +qc_publish(#state{bqstate = VQ}) -> + {call, rabbit_variable_queue, publish, + [qc_message(), #message_properties{}, self(), VQ]}. + +qc_fetch(#state{bqstate = VQ}) -> + {call, rabbit_variable_queue, fetch, [true, VQ]}. + +qc_ack(#state{bqstate = VQ, acks = Acks}) -> + {call, rabbit_variable_queue, ack, [sublist(Acks), VQ]}. + +qc_requeue(#state{bqstate = VQ, acks = Acks}) -> + {call, rabbit_variable_queue, requeue, + [sublist(Acks), fun(MsgOpts) -> MsgOpts end, VQ]}. + +qc_ram(#state{bqstate = VQ}) -> + {call, rabbit_variable_queue, set_ram_duration_target, + [oneof([0, infinity]), VQ]}. + +%% Preconditions + +precondition(_S, {call, rabbit_variable_queue, Fun, _Arg}) + when Fun =:= publish; Fun =:= fetch; Fun =:= set_ram_duration_target -> + true; +precondition(#state{acks = Acks}, {call, rabbit_variable_queue, Fun, _Arg}) + when Fun =:= ack; Fun =:= requeue -> + length(Acks) > 0. + +%% Next state + +next_state(S, VQ, {call, rabbit_variable_queue, publish, + [Msg, _MsgProps, _Pid, _VQ]}) -> + #state{messages = Messages} = S, + S#state{bqstate=VQ, messages= [Msg | Messages]}; + +next_state(S, Res, {call, rabbit_variable_queue, fetch, [AckReq, _VQ]}) -> + #state{messages = M, acks = Acks} = S, + ResultDetails = {call, erlang, element, [1, Res]}, + AckTag = {call, erlang, element, [3, ResultDetails]}, + VQ1 = {call, erlang, element, [2, Res]}, + S1 = S#state{bqstate = VQ1}, + case M of + [] -> S1; + [_|_] -> Msg = lists:last(M), + case AckReq of + true -> S1#state{messages = M -- [Msg], + acks = Acks ++ [AckTag]}; + false -> throw(non_ack_not_supported) + end + end; + +next_state(S, Res, {call, rabbit_variable_queue, ack, [AcksArg, _VQ]}) -> + #state{acks = AcksState} = S, + VQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = VQ1, + acks = AcksState -- AcksArg}; + +next_state(S, Res, {call, rabbit_variable_queue, requeue, [AcksArg, _F, _V]}) -> + #state{messages = Messages, acks = AcksState} = S, + VQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = VQ1, + messages = AcksArg ++ Messages, + acks = AcksState -- AcksArg}; + +next_state(S, VQ, {call, rabbit_variable_queue, set_ram_duration_target, _A}) -> + S#state{bqstate = VQ}. + +%% Postconditions + +postcondition(#state{bqstate = VQ, + messages = Messages}, + {call, _Mod, _Fun, _Args}, _Res) -> + rabbit_variable_queue:len(VQ) =:= length(Messages). + +%% Helpers + +qc_message_payload() -> + binary(). + +qc_routing_key() -> + noshrink(binary(10)). + +qc_delivery_mode() -> + oneof([1, 2]). + +qc_message() -> + qc_message(qc_delivery_mode()). + +qc_message(DeliveryMode) -> + {call, rabbit_basic, message, [ + qc_default_exchange(), + qc_routing_key(), + #'P_basic'{delivery_mode = DeliveryMode}, + qc_message_payload()]}. + +qc_default_exchange() -> + {call, rabbit_misc, r, [<<>>, exchange, <<>>]}. + +qc_variable_queue_init(Q) -> + {call, rabbit_variable_queue, init, + [Q, false, nop(2), nop(2), nop(2), nop(1)]}. + +qc_test_q() -> + {call, rabbit_misc, r, [<<"/">>, queue, noshrink(binary(16))]}. + +qc_test_queue() -> + qc_test_queue(boolean()). + +qc_test_queue(Durable) -> + #amqqueue{name = qc_test_q(), + durable = Durable, + auto_delete = false, + arguments = [], + pid = self()}. + +nop(N) -> function(N, ok). + +sublist(List) -> + case List of + [] -> []; + _ -> Item = lists:nth(random:uniform(length(List)), List), + case random:uniform(3) of + 1 -> [Item]; + _ -> [Item | sublist(List -- [Item])] + end + end. -- cgit v1.2.1 From 30f4103decf18fd4163aedccf35760eb19c4718a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Jun 2011 14:14:14 +0100 Subject: Rip out support for dynamically adding or removing queue mirrors --- docs/rabbitmqctl.1.xml | 43 ------------------------------------------- src/rabbit_control.erl | 12 ------------ 2 files changed, 55 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index cc3d4d2a..a0f03192 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1361,49 +1361,6 @@ - - - Mirrored Queue Management - - Mirrored queues can have slaves dynamically added, and slaves - or the master dynamically dropped. Refer to the High Availability - guide for further details about mirrored queues in - general. - - - - - add_queue_mirror queue_name node - - - Attempts to add a mirror of the queue - queue_name on - node. This will only succeed if the - queue was declared a mirrored queue and if there is no - mirror of the queue already on the node. If it succeeds, - the new mirror will start off as an empty slave. - - - - - - drop_queue_mirror queue_name node - - - Attempts to drop a mirror of the queue - queue_name on - node. This will only succeed if the - queue was declared a mirrored queue and if there is a - mirror of the queue already on the node. If the node - contains the master of the queue, a slave on some other - node will be promoted to become the new master. It is - not permitted to drop the only node of a mirrored-queue. - - - - - diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 9194a45b..9eef384a 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -274,18 +274,6 @@ action(list_queues, Node, Args, Opts, Inform) -> [VHostArg, ArgAtoms]), ArgAtoms); -action(add_queue_mirror, Node, [Queue, MirrorNode], Opts, Inform) -> - Inform("Adding mirror of queue ~p on node ~p~n", [Queue, MirrorNode]), - VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), - rpc_call(Node, rabbit_mirror_queue_misc, add_slave, - [VHostArg, list_to_binary(Queue), list_to_atom(MirrorNode)]); - -action(drop_queue_mirror, Node, [Queue, MirrorNode], Opts, Inform) -> - Inform("Dropping mirror of queue ~p on node ~p~n", [Queue, MirrorNode]), - VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), - rpc_call(Node, rabbit_mirror_queue_misc, drop_slave, - [VHostArg, list_to_binary(Queue), list_to_atom(MirrorNode)]); - action(list_exchanges, Node, Args, Opts, Inform) -> Inform("Listing exchanges", []), VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)), -- cgit v1.2.1 From 90d6060a8a108609d45163792412c09d7ce25c72 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Jun 2011 16:22:48 +0100 Subject: Sort out (everywhere except mirror modules) slave pids and mirror nodes and deal with non-local queue declaration, and introduce ha policy and ha policy params. (does not compile) --- include/rabbit.hrl | 2 +- src/rabbit_amqqueue.erl | 76 +++++++++++++++++++++++++--------------- src/rabbit_amqqueue_process.erl | 21 ++++++----- src/rabbit_amqqueue_sup.erl | 6 ++-- src/rabbit_mirror_queue_misc.erl | 24 ++++++------- src/rabbit_router.erl | 4 +-- src/rabbit_types.erl | 3 +- src/rabbit_upgrade_functions.erl | 11 +++--- 8 files changed, 86 insertions(+), 61 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 0a202c5e..00b7e6e9 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -46,7 +46,7 @@ -record(exchange_serial, {name, next}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, - arguments, pid, mirror_pids}). + arguments, pid, slave_pids, mirror_nodes}). %% mnesia doesn't like unary records, so we add a dummy 'value' field -record(route, {binding, value = const}). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e5c53620..0b25a4e0 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -193,19 +193,21 @@ find_durable_queues() -> end). recover_durable_queues(DurableQueues) -> - Qs = [start_queue_process(Q) || Q <- DurableQueues], + Qs = [start_queue_process(node(), Q) || Q <- DurableQueues], [QName || Q = #amqqueue{name = QName, pid = Pid} <- Qs, gen_server2:call(Pid, {init, true}, infinity) == {new, Q}]. declare(QueueName, Durable, AutoDelete, Args, Owner) -> ok = check_declare_arguments(QueueName, Args), - Q = start_queue_process(#amqqueue{name = QueueName, - durable = Durable, - auto_delete = AutoDelete, - arguments = Args, - exclusive_owner = Owner, - pid = none, - mirror_pids = []}), + {Node, MNodes} = determine_queue_nodes(Args), + Q = start_queue_process(Node, #amqqueue{name = QueueName, + durable = Durable, + auto_delete = AutoDelete, + arguments = Args, + exclusive_owner = Owner, + pid = none, + slave_pids = [], + mirror_nodes = MNodes}), case gen_server2:call(Q#amqqueue.pid, {init, false}, infinity) of not_found -> rabbit_misc:not_found(QueueName); Q1 -> Q1 @@ -243,8 +245,25 @@ store_queue(Q = #amqqueue{durable = false}) -> ok = mnesia:write(rabbit_queue, Q, write), ok. -start_queue_process(Q) -> - {ok, Pid} = rabbit_amqqueue_sup:start_child([Q]), +determine_queue_nodes(Args) -> + Policy = rabbit_misc:table_lookup(Args, <<"x-ha-policy">>), + PolicyParams = rabbit_misc:table_lookup(Args, <<"x-ha-policy-params">>), + case {Policy, PolicyParams} of + {{_Type, <<"nodes">>}, {array, Nodes}} -> + case [list_to_atom(binary_to_list(Node)) || + {longstr, Node} <- Nodes] of + [] -> {node(), undefined}; + [Node] -> {Node, undefined}; + [First | Rest] -> {First, Rest} + end; + {{_Type, <<"all">>}, _} -> + {node(), all}; + _ -> + {node(), undefined} + end. + +start_queue_process(Node, Q) -> + {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, [Q]), Q#amqqueue{pid = Pid}. add_default_binding(#amqqueue{name = QueueName}) -> @@ -260,7 +279,7 @@ lookup(Name) -> with(Name, F, E) -> case lookup(Name) of - {ok, Q = #amqqueue{mirror_pids = []}} -> + {ok, Q = #amqqueue{slave_pids = []}} -> rabbit_misc:with_exit_handler(E, fun () -> F(Q) end); {ok, Q} -> E1 = fun () -> timer:sleep(25), with(Name, F, E) end, @@ -305,7 +324,7 @@ assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, RequiredArgs) -> rabbit_misc:assert_args_equivalence( Args, RequiredArgs, QueueName, - [<<"x-expires">>, <<"x-message-ttl">>, <<"x-mirror">>]). + [<<"x-expires">>, <<"x-message-ttl">>, <<"x-ha-policy">>]). check_declare_arguments(QueueName, Args) -> [case Fun(rabbit_misc:table_lookup(Args, Key)) of @@ -317,7 +336,7 @@ check_declare_arguments(QueueName, Args) -> end || {Key, Fun} <- [{<<"x-expires">>, fun check_integer_argument/1}, {<<"x-message-ttl">>, fun check_integer_argument/1}, - {<<"x-mirror">>, fun check_array_of_longstr_argument/1}]], + {<<"x-ha-policy">>, fun check_ha_policy_argument/1}]], ok. check_integer_argument(undefined) -> @@ -330,16 +349,14 @@ check_integer_argument({Type, Val}) when Val > 0 -> check_integer_argument({_Type, Val}) -> {error, {value_zero_or_less, Val}}. -check_array_of_longstr_argument(undefined) -> +check_ha_policy_argument(undefined) -> ok; -check_array_of_longstr_argument({array, Array}) -> - case lists:all(fun ({longstr, _NodeName}) -> true; - (_) -> false - end, Array) of - true -> ok; - false -> {error, {array_contains_non_longstrs, Array}} - end; -check_array_of_longstr_argument({Type, _Val}) -> +check_ha_policy_argument({longstr, Policy}) + when Policy =:= <<"nodes">> orelse Policy =:= <<"all">> -> + ok; +check_ha_policy_argument({longstr, Policy}) -> + {error, {invalid_ha_policy, Policy}}; +check_ha_policy_argument({Type, _}) -> {error, {unacceptable_type, Type}}. list(VHostPath) -> @@ -497,7 +514,7 @@ on_node_down(Node) -> rabbit_misc:execute_mnesia_tx_with_tail( fun () -> Dels = qlc:e(qlc:q([delete_queue(QueueName) || #amqqueue{name = QueueName, pid = Pid, - mirror_pids = []} + slave_pids = []} <- mnesia:table(rabbit_queue), node(Pid) == Node])), rabbit_binding:process_deletions( @@ -510,12 +527,13 @@ delete_queue(QueueName) -> rabbit_binding:remove_transient_for_destination(QueueName). pseudo_queue(QueueName, Pid) -> - #amqqueue{name = QueueName, - durable = false, - auto_delete = false, - arguments = [], - pid = Pid, - mirror_pids = []}. + #amqqueue{name = QueueName, + durable = false, + auto_delete = false, + arguments = [], + pid = Pid, + slave_pids = [], + mirror_nodes = undefined}. safe_delegate_call_ok(F, Pids) -> case delegate:invoke(Pids, fun (Pid) -> diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index e7cb67a2..6d0f7f25 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -75,7 +75,7 @@ consumers, memory, backing_queue_status, - mirror_pids + slave_pids ]). -define(CREATION_EVENT_KEYS, @@ -84,7 +84,8 @@ durable, auto_delete, arguments, - owner_pid + owner_pid, + mirror_nodes ]). -define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]). @@ -101,7 +102,8 @@ init(Q) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), process_flag(trap_exit, true), - {ok, #q{q = Q#amqqueue{pid = self()}, + {ok, #q{q = Q#amqqueue{pid = self(), + mirror_nodes = MirrorNodes}, exclusive_consumer = none, has_had_consumers = false, backing_queue = backing_queue_module(Q), @@ -257,10 +259,10 @@ next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> end. backing_queue_module(#amqqueue{arguments = Args}) -> - case rabbit_misc:table_lookup(Args, <<"x-mirror">>) of + case rabbit_misc:table_lookup(Args, <<"x-ha-policy">>) of undefined -> {ok, BQM} = application:get_env(backing_queue_module), BQM; - _Nodes -> rabbit_mirror_queue_master + _Policy -> rabbit_mirror_queue_master end. ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> @@ -803,9 +805,12 @@ i(memory, _) -> M; i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) -> BQ:status(BQS); -i(mirror_pids, #q{q = #amqqueue{name = Name}}) -> - {ok, #amqqueue{mirror_pids = MPids}} = rabbit_amqqueue:lookup(Name), - MPids; +i(slave_pids, #q{q = #amqqueue{name = Name}}) -> + {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(Name), + SPids; +i(mirror_nodes, #q{q = #amqqueue{name = Name}}) -> + {ok, #amqqueue{mirror_nodes = MNodes}} = rabbit_amqqueue:lookup(Name), + MNodes; i(Item, _) -> throw({bad_argument, Item}). diff --git a/src/rabbit_amqqueue_sup.erl b/src/rabbit_amqqueue_sup.erl index 1344956e..2c28adce 100644 --- a/src/rabbit_amqqueue_sup.erl +++ b/src/rabbit_amqqueue_sup.erl @@ -18,7 +18,7 @@ -behaviour(supervisor2). --export([start_link/0, start_child/1]). +-export([start_link/0, start_child/2]). -export([init/1]). @@ -29,8 +29,8 @@ start_link() -> supervisor2:start_link({local, ?SERVER}, ?MODULE, []). -start_child(Args) -> - supervisor2:start_child(?SERVER, Args). +start_child(Node, Args) -> + supervisor2:start_child({?SERVER, Node}, Args). init([]) -> {ok, {{simple_one_for_one_terminate, 10, 10}, diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 046d3380..94402d28 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -34,20 +34,20 @@ remove_from_queue(QueueName, DeadPids) -> %% get here. case mnesia:read({rabbit_queue, QueueName}) of [] -> {error, not_found}; - [Q = #amqqueue { pid = QPid, - mirror_pids = MPids }] -> - [QPid1 | MPids1] = - [Pid || Pid <- [QPid | MPids], + [Q = #amqqueue { pid = QPid, + slave_pids = SPids }] -> + [QPid1 | SPids1] = + [Pid || Pid <- [QPid | SPids], not lists:member(node(Pid), DeadNodes)], - case {{QPid, MPids}, {QPid1, MPids1}} of + case {{QPid, SPids}, {QPid1, SPids1}} of {Same, Same} -> ok; _ when QPid =:= QPid1 orelse node(QPid1) =:= node() -> %% Either master hasn't changed, so %% we're ok to update mnesia; or master %% has changed to become us! - Q1 = Q #amqqueue { pid = QPid1, - mirror_pids = MPids1 }, + Q1 = Q #amqqueue { pid = QPid1, + slave_pids = SPids1 }, ok = rabbit_amqqueue:store_queue(Q1); _ -> %% Master has changed, and we're not it, @@ -91,11 +91,11 @@ drop_slave(VHostPath, QueueName, MirrorNode) -> drop_slave(Queue, MirrorNode) -> if_mirrored_queue( Queue, - fun (#amqqueue { name = Name, pid = QPid, mirror_pids = MPids }) -> - case [Pid || Pid <- [QPid | MPids], node(Pid) =:= MirrorNode] of + fun (#amqqueue { name = Name, pid = QPid, slave_pids = SPids }) -> + case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of [] -> {error, {queue_not_mirrored_on_node, MirrorNode}}; - [QPid | MPids] -> + [QPid | SPids] -> {error, cannot_drop_only_mirror}; [Pid] -> rabbit_log:info("Dropping slave node on ~p for ~s~n", @@ -111,8 +111,8 @@ add_slave(VHostPath, QueueName, MirrorNode) -> add_slave(Queue, MirrorNode) -> if_mirrored_queue( Queue, - fun (#amqqueue { name = Name, pid = QPid, mirror_pids = MPids } = Q) -> - case [Pid || Pid <- [QPid | MPids], node(Pid) =:= MirrorNode] of + fun (#amqqueue { name = Name, pid = QPid, slave_pids = SPids } = Q) -> + case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of [] -> Result = rabbit_mirror_queue_slave_sup:start_child( MirrorNode, [Q]), rabbit_log:info( diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index b1d940d2..26780676 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -110,8 +110,8 @@ check_delivery(_ , _ , {_ , Qs}) -> {routed, Qs}. lookup_qpids(QNames) -> lists:foldl(fun (QName, QPids) -> case mnesia:dirty_read({rabbit_queue, QName}) of - [#amqqueue{pid = QPid, mirror_pids = MPids}] -> - MPids ++ [QPid | QPids]; + [#amqqueue{pid = QPid, slave_pids = SPids}] -> + SPids ++ [QPid | QPids]; [] -> QPids end diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 22204100..03b2c9e8 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -125,7 +125,8 @@ exclusive_owner :: rabbit_types:maybe(pid()), arguments :: rabbit_framing:amqp_table(), pid :: rabbit_types:maybe(pid()), - mirror_pids :: [pid()]}). + slave_pids :: [pid()], + mirror_nodes :: [node()] | 'undefined' | 'all'}). -type(exchange() :: #exchange{name :: rabbit_exchange:name(), diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index b4ac3328..ac2c378c 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -30,7 +30,7 @@ -rabbit_upgrade({exchange_event_serial, mnesia, []}). -rabbit_upgrade({trace_exchanges, mnesia, []}). -rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}). --rabbit_upgrade({mirror_pids, mnesia, []}). +-rabbit_upgrade({ha_mirrors, mnesia, []}). -rabbit_upgrade({gm, mnesia, []}). %% ------------------------------------------------------------------- @@ -47,7 +47,7 @@ -spec(exchange_event_serial/0 :: () -> 'ok'). -spec(trace_exchanges/0 :: () -> 'ok'). -spec(user_admin_to_tags/0 :: () -> 'ok'). --spec(mirror_pids/0 :: () -> 'ok'). +-spec(ha_mirrors/0 :: () -> 'ok'). -spec(gm/0 :: () -> 'ok'). -endif. @@ -137,16 +137,17 @@ user_admin_to_tags() -> end, [username, password_hash, tags], internal_user). -mirror_pids() -> +ha_mirrors() -> Tables = [rabbit_queue, rabbit_durable_queue], AddMirrorPidsFun = fun ({amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid}) -> - {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid, []} + {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid, + [], undefined} end, [ ok = transform(T, AddMirrorPidsFun, [name, durable, auto_delete, exclusive_owner, arguments, - pid, mirror_pids]) + pid, slave_pids, mirror_nodes]) || T <- Tables ], ok. -- cgit v1.2.1 From a210d0695e455ad87ae28efcfd66a900123031f2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Jun 2011 16:23:15 +0100 Subject: forgot to remove. (dnc) --- src/rabbit_amqqueue_process.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 6d0f7f25..c1fa048d 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -102,8 +102,7 @@ init(Q) -> ?LOGDEBUG("Queue starting - ~p~n", [Q]), process_flag(trap_exit, true), - {ok, #q{q = Q#amqqueue{pid = self(), - mirror_nodes = MirrorNodes}, + {ok, #q{q = Q#amqqueue{pid = self()}, exclusive_consumer = none, has_had_consumers = false, backing_queue = backing_queue_module(Q), -- cgit v1.2.1 From 6db76bf64b8a941f5ac7bb76f042e38088d7404a Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Jun 2011 16:43:25 +0100 Subject: Sort out misc. (dnc) --- src/rabbit_amqqueue.erl | 4 +-- src/rabbit_mirror_queue_master.erl | 2 +- src/rabbit_mirror_queue_misc.erl | 60 ++++++++++++++++++-------------------- 3 files changed, 31 insertions(+), 35 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 0b25a4e0..e279b055 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -253,8 +253,8 @@ determine_queue_nodes(Args) -> case [list_to_atom(binary_to_list(Node)) || {longstr, Node} <- Nodes] of [] -> {node(), undefined}; - [Node] -> {Node, undefined}; - [First | Rest] -> {First, Rest} + [Node] -> {Node, undefined}; + [First | Rest] -> {First, Rest} end; {{_Type, <<"all">>}, _} -> {node(), all}; diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index da12ea82..dd2e76a1 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -82,7 +82,7 @@ init(#amqqueue { arguments = Args, name = QName } = Q, Recover, _ -> [list_to_atom(binary_to_list(Node)) || {longstr, Node} <- Nodes] end) -- [node()], - [rabbit_mirror_queue_misc:add_slave(QName, Node) || Node <- Nodes1], + [rabbit_mirror_queue_misc:add_mirror(QName, Node) || Node <- Nodes1], {ok, BQ} = application:get_env(backing_queue_module), BQS = BQ:init(Q, Recover, AsyncCallback, SyncCallback), #state { gm = GM, diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 94402d28..633af6cb 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -17,7 +17,7 @@ -module(rabbit_mirror_queue_misc). -export([remove_from_queue/2, on_node_up/0, - drop_slave/2, drop_slave/3, add_slave/2, add_slave/3]). + drop_mirror/2, drop_mirror/3, add_mirror/2, add_mirror/3]). -include("rabbit.hrl"). @@ -25,7 +25,9 @@ %% then only remove that if we are about to be promoted. Otherwise we %% can have the situation where a slave updates the mnesia record for %% a queue, promoting another slave before that slave realises it has -%% become the new master. +%% become the new master, which is bad because it could then mean the +%% slave (now master) receives messages it's not ready for (for +%% example, new consumers). remove_from_queue(QueueName, DeadPids) -> DeadNodes = [node(DeadPid) || DeadPid <- DeadPids], rabbit_misc:execute_mnesia_transaction( @@ -34,8 +36,8 @@ remove_from_queue(QueueName, DeadPids) -> %% get here. case mnesia:read({rabbit_queue, QueueName}) of [] -> {error, not_found}; - [Q = #amqqueue { pid = QPid, - slave_pids = SPids }] -> + [Q = #amqqueue { pid = QPid, + slave_pids = SPids }] -> [QPid1 | SPids1] = [Pid || Pid <- [QPid | SPids], not lists:member(node(Pid), DeadNodes)], @@ -44,8 +46,8 @@ remove_from_queue(QueueName, DeadPids) -> ok; _ when QPid =:= QPid1 orelse node(QPid1) =:= node() -> %% Either master hasn't changed, so - %% we're ok to update mnesia; or master - %% has changed to become us! + %% we're ok to update mnesia; or we have + %% become the master. Q1 = Q #amqqueue { pid = QPid1, slave_pids = SPids1 }, ok = rabbit_amqqueue:store_queue(Q1); @@ -65,50 +67,44 @@ on_node_up() -> rabbit_misc:execute_mnesia_transaction( fun () -> mnesia:foldl( - fun (#amqqueue{ arguments = Args, name = QName }, QsN) -> - case rabbit_misc:table_lookup( - Args, <<"x-mirror">>) of - {_Type, []} -> - [QName | QsN]; - {_Type, Nodes} -> - Nodes1 = [list_to_atom(binary_to_list(Node)) - || {longstr, Node} <- Nodes], - case lists:member(node(), Nodes1) of - true -> [QName | QsN]; - false -> QsN - end; - _ -> - QsN + fun (#amqqueue { mirror_nodes = [] }, QsN) -> + QsN; + (#amqqueue { name = QName, + mirror_nodes = MNodes }, QsN) -> + case lists:member(node(), MNodes) of + true -> [QName | QsN]; + false -> QsN end end, [], rabbit_queue) end), - [add_slave(Q, node()) || Q <- Qs], + [add_mirror(Q, node()) || Q <- Qs], ok. -drop_slave(VHostPath, QueueName, MirrorNode) -> - drop_slave(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). +drop_mirror(VHostPath, QueueName, MirrorNode) -> + drop_mirror(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). -drop_slave(Queue, MirrorNode) -> +drop_mirror(Queue, MirrorNode) -> if_mirrored_queue( Queue, fun (#amqqueue { name = Name, pid = QPid, slave_pids = SPids }) -> case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of [] -> {error, {queue_not_mirrored_on_node, MirrorNode}}; - [QPid | SPids] -> + [QPid] when SPids =:= [] -> {error, cannot_drop_only_mirror}; [Pid] -> - rabbit_log:info("Dropping slave node on ~p for ~s~n", - [MirrorNode, rabbit_misc:rs(Name)]), + rabbit_log:info( + "Dropping queue mirror on node ~p for ~s~n", + [MirrorNode, rabbit_misc:rs(Name)]), exit(Pid, {shutdown, dropped}), ok end end). -add_slave(VHostPath, QueueName, MirrorNode) -> - add_slave(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). +add_mirror(VHostPath, QueueName, MirrorNode) -> + add_mirror(rabbit_misc:r(VHostPath, queue, QueueName), MirrorNode). -add_slave(Queue, MirrorNode) -> +add_mirror(Queue, MirrorNode) -> if_mirrored_queue( Queue, fun (#amqqueue { name = Name, pid = QPid, slave_pids = SPids } = Q) -> @@ -116,7 +112,7 @@ add_slave(Queue, MirrorNode) -> [] -> Result = rabbit_mirror_queue_slave_sup:start_child( MirrorNode, [Q]), rabbit_log:info( - "Adding slave node for ~s on node ~p: ~p~n", + "Adding mirror of queue ~s on node ~p: ~p~n", [rabbit_misc:rs(Name), MirrorNode, Result]), case Result of {ok, _Pid} -> ok; @@ -129,7 +125,7 @@ add_slave(Queue, MirrorNode) -> if_mirrored_queue(Queue, Fun) -> rabbit_amqqueue:with( Queue, fun (#amqqueue { arguments = Args } = Q) -> - case rabbit_misc:table_lookup(Args, <<"x-mirror">>) of + case rabbit_misc:table_lookup(Args, <<"x-ha-policy">>) of undefined -> ok; _ -> Fun(Q) end -- cgit v1.2.1 From a8117713d9b4333c994016d04efe4ce6ef020643 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Jun 2011 16:51:22 +0100 Subject: All done. --- src/rabbit_mirror_queue_misc.erl | 5 +++++ src/rabbit_mirror_queue_slave.erl | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 633af6cb..7175d059 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -69,6 +69,11 @@ on_node_up() -> mnesia:foldl( fun (#amqqueue { mirror_nodes = [] }, QsN) -> QsN; + (#amqqueue { mirror_nodes = undefined }, QsN) -> + QsN; + (#amqqueue { name = QName, + mirror_nodes = all }, QsN) -> + [QName | QsN]; (#amqqueue { name = QName, mirror_nodes = MNodes }, QsN) -> case lists:member(node(), MNodes) of diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index c5f83c24..55d61d41 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -84,13 +84,13 @@ init([#amqqueue { name = QueueName } = Q]) -> {ok, MPid} = rabbit_misc:execute_mnesia_transaction( fun () -> - [Q1 = #amqqueue { pid = QPid, mirror_pids = MPids }] = + [Q1 = #amqqueue { pid = QPid, slave_pids = MPids }] = mnesia:read({rabbit_queue, QueueName}), %% ASSERTION [] = [Pid || Pid <- [QPid | MPids], node(Pid) =:= Node], MPids1 = MPids ++ [Self], mnesia:write(rabbit_queue, - Q1 #amqqueue { mirror_pids = MPids1 }, + Q1 #amqqueue { slave_pids = MPids1 }, write), {ok, QPid} end), @@ -714,7 +714,7 @@ process_instruction( end; {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ2} -> %% The instruction was sent to us before we were - %% within the mirror_pids within the #amqqueue{} + %% within the slave_pids within the #amqqueue{} %% record. We'll never receive the message directly %% from the channel. And the channel will not be %% expecting any confirms from us. @@ -756,7 +756,7 @@ process_instruction({discard, ChPid, Msg = #basic_message { id = MsgId }}, {MQ2, PendingCh, MS}; {{value, {#delivery {}, _EnqueueOnPromotion}}, _MQ2} -> %% The instruction was sent to us before we were - %% within the mirror_pids within the #amqqueue{} + %% within the slave_pids within the #amqqueue{} %% record. We'll never receive the message directly %% from the channel. {MQ, PendingCh, MS} -- cgit v1.2.1 From d0a265798087bd79c4572f042c5730c7d27c3699 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 21 Jun 2011 16:52:08 +0100 Subject: Remove rubbish --- src/rabbit_mirror_queue_misc.erl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 7175d059..4761f79e 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -67,9 +67,7 @@ on_node_up() -> rabbit_misc:execute_mnesia_transaction( fun () -> mnesia:foldl( - fun (#amqqueue { mirror_nodes = [] }, QsN) -> - QsN; - (#amqqueue { mirror_nodes = undefined }, QsN) -> + fun (#amqqueue { mirror_nodes = undefined }, QsN) -> QsN; (#amqqueue { name = QName, mirror_nodes = all }, QsN) -> -- cgit v1.2.1 From ff249bde2400561dd61e0ecdddba107dabc6e620 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 21 Jun 2011 18:30:07 +0100 Subject: More realistic tests --- src/rabbit_backing_queue_qc.erl | 144 +++++++++++++++++++++++----------------- 1 file changed, 83 insertions(+), 61 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index 95832842..f5cd7f95 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -15,9 +15,14 @@ %% -module(rabbit_backing_queue_qc). +-include("rabbit.hrl"). +-include("rabbit_framing.hrl"). +-include_lib("proper/include/proper.hrl"). -behaviour(proper_statem). +-define(BQMOD, rabbit_variable_queue). + -export([initial_state/0, command/1, precondition/2, postcondition/3, next_state/3]). @@ -28,109 +33,125 @@ messages, acks}). --include("rabbit.hrl"). --include("rabbit_framing.hrl"). --include_lib("proper/include/proper.hrl"). initial_state() -> VQ = qc_variable_queue_init(qc_test_queue()), - #state{bqstate=VQ, messages = [], acks = []}. + #state{bqstate=VQ, messages = queue:new(), acks = []}. prop_backing_queue_test() -> ?FORALL(Cmds, commands(?MODULE, initial_state()), begin - {_H, _S, Res} = run_commands(?MODULE, Cmds), + {_H, #state{bqstate = VQ}, Res} = run_commands(?MODULE, Cmds), + rabbit_variable_queue:delete_and_terminate(shutdown, VQ), ?WHENFAIL( - io:format("Result: ~p~n", [Res]), - aggregate(command_names(Cmds), Res =:= ok)) + io:format("Result: ~p~n", [Res]), + aggregate(command_names(Cmds), Res =:= ok)) end). %% Commands -command(S) -> - frequency([{5, qc_publish(S)}, - {3, qc_fetch(S)}, - {2, qc_ack(S)}, - {2, qc_requeue(S)}, - {1, qc_ram(S)}]). +command(#state{bqstate = VQ} = S) -> + ?SIZED(Size, + frequency([{Size, qc_publish(S)}, + {Size, qc_fetch(S)}, + {Size, qc_ack(S)}, + {Size, qc_requeue(S)}, + {Size, qc_ram(S)}, + {1, {call, ?BQMOD, purge, [VQ]}}])). qc_publish(#state{bqstate = VQ}) -> - {call, rabbit_variable_queue, publish, - [qc_message(), #message_properties{}, self(), VQ]}. + {call, ?BQMOD, publish, + [qc_message(), #message_properties{}, self(), VQ]}. qc_fetch(#state{bqstate = VQ}) -> - {call, rabbit_variable_queue, fetch, [true, VQ]}. + {call, ?BQMOD, fetch, [boolean(), VQ]}. qc_ack(#state{bqstate = VQ, acks = Acks}) -> - {call, rabbit_variable_queue, ack, [sublist(Acks), VQ]}. + {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), VQ]}. qc_requeue(#state{bqstate = VQ, acks = Acks}) -> - {call, rabbit_variable_queue, requeue, - [sublist(Acks), fun(MsgOpts) -> MsgOpts end, VQ]}. + {call, ?BQMOD, requeue, + [rand_choice(proplists:get_keys(Acks)), fun(MsgOpts) -> MsgOpts end, VQ]}. qc_ram(#state{bqstate = VQ}) -> - {call, rabbit_variable_queue, set_ram_duration_target, - [oneof([0, infinity]), VQ]}. + {call, ?BQMOD, set_ram_duration_target, + [oneof([0, infinity]), VQ]}. %% Preconditions -precondition(_S, {call, rabbit_variable_queue, Fun, _Arg}) - when Fun =:= publish; Fun =:= fetch; Fun =:= set_ram_duration_target -> - true; -precondition(#state{acks = Acks}, {call, rabbit_variable_queue, Fun, _Arg}) +precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg}) when Fun =:= ack; Fun =:= requeue -> - length(Acks) > 0. + length(Acks) > 0; +precondition(_S, {call, ?BQMOD, _Fun, _Arg}) -> + true. %% Next state -next_state(S, VQ, {call, rabbit_variable_queue, publish, - [Msg, _MsgProps, _Pid, _VQ]}) -> +next_state(S, VQ, {call, ?BQMOD, publish, [Msg, _MsgProps, _Pid, _VQ]}) -> #state{messages = Messages} = S, - S#state{bqstate=VQ, messages= [Msg | Messages]}; - -next_state(S, Res, {call, rabbit_variable_queue, fetch, [AckReq, _VQ]}) -> - #state{messages = M, acks = Acks} = S, - ResultDetails = {call, erlang, element, [1, Res]}, - AckTag = {call, erlang, element, [3, ResultDetails]}, - VQ1 = {call, erlang, element, [2, Res]}, - S1 = S#state{bqstate = VQ1}, - case M of - [] -> S1; - [_|_] -> Msg = lists:last(M), - case AckReq of - true -> S1#state{messages = M -- [Msg], - acks = Acks ++ [AckTag]}; - false -> throw(non_ack_not_supported) - end + S#state{bqstate = VQ, messages = queue:in(Msg, Messages)}; + +next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _VQ]}) -> + #state{messages = Messages, acks = Acks} = S, + ResultInfo = {call, erlang, element, [1, Res]}, + VQ1 = {call, erlang, element, [2, Res]}, + AckTag = {call, erlang, element, [3, ResultInfo]}, + S1 = S#state{bqstate = VQ1}, + case queue:out(Messages) of + {empty, _M2} -> + S1; + {{value, Msg}, M2} -> + S2 = S1#state{messages = M2}, + case AckReq of + true -> S2#state{acks = Acks ++ [{AckTag, Msg}]}; + false -> S2 + end end; -next_state(S, Res, {call, rabbit_variable_queue, ack, [AcksArg, _VQ]}) -> +next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _VQ]}) -> #state{acks = AcksState} = S, VQ1 = {call, erlang, element, [2, Res]}, S#state{bqstate = VQ1, - acks = AcksState -- AcksArg}; + acks = propvals_by_keys(AcksState, AcksArg)}; -next_state(S, Res, {call, rabbit_variable_queue, requeue, [AcksArg, _F, _V]}) -> +next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> #state{messages = Messages, acks = AcksState} = S, VQ1 = {call, erlang, element, [2, Res]}, + RequeueMsgs = [proplists:get_value(Key, AcksState) || Key <- AcksArg ], S#state{bqstate = VQ1, - messages = AcksArg ++ Messages, - acks = AcksState -- AcksArg}; + messages = queue:join(Messages, queue:from_list(RequeueMsgs)), + acks = propvals_by_keys(AcksState, AcksArg)}; + +next_state(S, VQ, {call, ?BQMOD, set_ram_duration_target, _A}) -> + S#state{bqstate = VQ}; -next_state(S, VQ, {call, rabbit_variable_queue, set_ram_duration_target, _A}) -> - S#state{bqstate = VQ}. +next_state(S, Res, {call, ?BQMOD, purge, _A}) -> + VQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = VQ1, messages = queue:new()}. %% Postconditions +postcondition(#state{messages = Messages}, {call, ?BQMOD, fetch, _Args}, Res) -> + case Res of + {{MsgFetched, _IsDelivered, _AckTag, _Remaining_Len}, _VQ} -> + MsgFetched =:= queue:head(Messages); + {empty, _VQ} -> + queue:len(Messages) =:= 0 + end; + +postcondition(#state{messages = Messages}, {call, ?BQMOD, purge, _Args}, Res) -> + {PurgeCount, _VQ} = Res, + queue:len(Messages) =:= PurgeCount; + postcondition(#state{bqstate = VQ, messages = Messages}, - {call, _Mod, _Fun, _Args}, _Res) -> - rabbit_variable_queue:len(VQ) =:= length(Messages). + {call, ?BQMOD, _Fun, _Args}, _Res) -> + ?BQMOD:len(VQ) =:= queue:len(Messages). %% Helpers qc_message_payload() -> - binary(). + ?SIZED(Size, resize(Size * Size, binary())). qc_routing_key() -> noshrink(binary(10)). @@ -152,7 +173,7 @@ qc_default_exchange() -> {call, rabbit_misc, r, [<<>>, exchange, <<>>]}. qc_variable_queue_init(Q) -> - {call, rabbit_variable_queue, init, + {call, ?BQMOD, init, [Q, false, nop(2), nop(2), nop(2), nop(1)]}. qc_test_q() -> @@ -170,12 +191,13 @@ qc_test_queue(Durable) -> nop(N) -> function(N, ok). -sublist(List) -> +propvals_by_keys(Props, Keys) -> + lists:filter(fun ({Key, _Msg}) -> + not lists:member(Key, Keys) + end, Props). + +rand_choice(List) -> case List of [] -> []; - _ -> Item = lists:nth(random:uniform(length(List)), List), - case random:uniform(3) of - 1 -> [Item]; - _ -> [Item | sublist(List -- [Item])] - end + _ -> [lists:nth(random:uniform(length(List)), List)] end. -- cgit v1.2.1 From 079ccc099706ea5d733548df1dfe279ee43a3802 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 22 Jun 2011 12:26:59 +0100 Subject: Be more helpful --- src/rabbit_amqqueue.erl | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e279b055..f2a00ebd 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -327,36 +327,49 @@ assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args}, [<<"x-expires">>, <<"x-message-ttl">>, <<"x-ha-policy">>]). check_declare_arguments(QueueName, Args) -> - [case Fun(rabbit_misc:table_lookup(Args, Key)) of + [case Fun(rabbit_misc:table_lookup(Args, Key), Args) of ok -> ok; {error, Error} -> rabbit_misc:protocol_error( precondition_failed, "invalid arg '~s' for ~s: ~w", [Key, rabbit_misc:rs(QueueName), Error]) end || {Key, Fun} <- - [{<<"x-expires">>, fun check_integer_argument/1}, - {<<"x-message-ttl">>, fun check_integer_argument/1}, - {<<"x-ha-policy">>, fun check_ha_policy_argument/1}]], + [{<<"x-expires">>, fun check_integer_argument/2}, + {<<"x-message-ttl">>, fun check_integer_argument/2}, + {<<"x-ha-policy">>, fun check_ha_policy_argument/2}]], ok. -check_integer_argument(undefined) -> +check_integer_argument(undefined, _Args) -> ok; -check_integer_argument({Type, Val}) when Val > 0 -> +check_integer_argument({Type, Val}, _Args) when Val > 0 -> case lists:member(Type, ?INTEGER_ARG_TYPES) of true -> ok; false -> {error, {unacceptable_type, Type}} end; -check_integer_argument({_Type, Val}) -> +check_integer_argument({_Type, Val}, _Args) -> {error, {value_zero_or_less, Val}}. -check_ha_policy_argument(undefined) -> +check_ha_policy_argument(undefined, _Args) -> ok; -check_ha_policy_argument({longstr, Policy}) - when Policy =:= <<"nodes">> orelse Policy =:= <<"all">> -> +check_ha_policy_argument({longstr, <<"all">>}, _Args) -> ok; -check_ha_policy_argument({longstr, Policy}) -> +check_ha_policy_argument({longstr, <<"nodes">>}, _Args) -> + case rabbit_misc:table_lookup(Args, <<"x-ha-policy-params">>) of + undefined -> + {error, {require, <<"x-ha-policy-params">>}}; + {array, Ary} -> + case lists:all(fun ({longstr, _Node}) -> true; + _ -> false + end, Ary) of + true -> ok; + false -> {error, {require_list_of_nodes_as_longstrs, Ary}} + end; + {Type, _} -> + {error, {ha_nodes_policy_params_not_array_of_longstr, Type}} + end; +check_ha_policy_argument({longstr, Policy}, _Args) -> {error, {invalid_ha_policy, Policy}}; -check_ha_policy_argument({Type, _}) -> +check_ha_policy_argument({Type, _}, _Args) -> {error, {unacceptable_type, Type}}. list(VHostPath) -> -- cgit v1.2.1 From f32d634776ae501415860c1e6452a0c558d8c775 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 22 Jun 2011 12:28:01 +0100 Subject: Be more helpful...and make it compile --- src/rabbit_amqqueue.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index f2a00ebd..36701631 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -353,13 +353,13 @@ check_ha_policy_argument(undefined, _Args) -> ok; check_ha_policy_argument({longstr, <<"all">>}, _Args) -> ok; -check_ha_policy_argument({longstr, <<"nodes">>}, _Args) -> +check_ha_policy_argument({longstr, <<"nodes">>}, Args) -> case rabbit_misc:table_lookup(Args, <<"x-ha-policy-params">>) of undefined -> {error, {require, <<"x-ha-policy-params">>}}; {array, Ary} -> case lists:all(fun ({longstr, _Node}) -> true; - _ -> false + (_ ) -> false end, Ary) of true -> ok; false -> {error, {require_list_of_nodes_as_longstrs, Ary}} -- cgit v1.2.1 From 4122451797d4ab8be585e42037819308f09acbee Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 22 Jun 2011 12:33:06 +0100 Subject: Really really don't look at x-mirror anymore. sigh. --- src/rabbit_mirror_queue_master.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index dd2e76a1..463b8cfb 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -71,18 +71,18 @@ sender_death_fun() -> end) end. -init(#amqqueue { arguments = Args, name = QName } = Q, Recover, +init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover, AsyncCallback, SyncCallback) -> {ok, CPid} = rabbit_mirror_queue_coordinator:start_link( Q, undefined, sender_death_fun()), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), - {_Type, Nodes} = rabbit_misc:table_lookup(Args, <<"x-mirror">>), - Nodes1 = (case Nodes of - [] -> rabbit_mnesia:all_clustered_nodes(); - _ -> [list_to_atom(binary_to_list(Node)) || - {longstr, Node} <- Nodes] - end) -- [node()], - [rabbit_mirror_queue_misc:add_mirror(QName, Node) || Node <- Nodes1], + MNodes1 = + (case MNodes of + all -> rabbit_mnesia:all_clustered_nodes(); + undefined -> []; + _ -> [list_to_atom(binary_to_list(Node)) || Node <- MNodes] + end) -- [node()], + [rabbit_mirror_queue_misc:add_mirror(QName, Node) || Node <- MNodes1], {ok, BQ} = application:get_env(backing_queue_module), BQS = BQ:init(Q, Recover, AsyncCallback, SyncCallback), #state { gm = GM, -- cgit v1.2.1 From 1c26a00f593606c5af6b09ff16f7e74c05393fdd Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 22 Jun 2011 13:09:39 +0100 Subject: Make an error more likely to be formatted sensibly --- src/rabbit_amqqueue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 36701631..21e3721e 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -356,7 +356,7 @@ check_ha_policy_argument({longstr, <<"all">>}, _Args) -> check_ha_policy_argument({longstr, <<"nodes">>}, Args) -> case rabbit_misc:table_lookup(Args, <<"x-ha-policy-params">>) of undefined -> - {error, {require, <<"x-ha-policy-params">>}}; + {error, {require, 'x-ha-policy-params'}}; {array, Ary} -> case lists:all(fun ({longstr, _Node}) -> true; (_ ) -> false -- cgit v1.2.1 From eb5db5fde98de1d996ac8fd09f8251c22aafd1d2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 22 Jun 2011 13:32:30 +0100 Subject: Explode if the nodes policy has an empty list as the policy params --- src/rabbit_amqqueue.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 21e3721e..bacb1d21 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -252,7 +252,6 @@ determine_queue_nodes(Args) -> {{_Type, <<"nodes">>}, {array, Nodes}} -> case [list_to_atom(binary_to_list(Node)) || {longstr, Node} <- Nodes] of - [] -> {node(), undefined}; [Node] -> {Node, undefined}; [First | Rest] -> {First, Rest} end; @@ -357,12 +356,14 @@ check_ha_policy_argument({longstr, <<"nodes">>}, Args) -> case rabbit_misc:table_lookup(Args, <<"x-ha-policy-params">>) of undefined -> {error, {require, 'x-ha-policy-params'}}; + {array, []} -> + {error, {require_non_empty_list_of_nodes_for_ha}}; {array, Ary} -> case lists:all(fun ({longstr, _Node}) -> true; (_ ) -> false end, Ary) of - true -> ok; - false -> {error, {require_list_of_nodes_as_longstrs, Ary}} + true -> ok; + false -> {error, {require_node_list_as_longstrs_for_ha, Ary}} end; {Type, _} -> {error, {ha_nodes_policy_params_not_array_of_longstr, Type}} -- cgit v1.2.1 From da0b7e1d3ec788182a93df14ebc6dbc509565a27 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 24 Jun 2011 12:20:03 +0100 Subject: Oops, we need the dependency. --- src/rabbit_upgrade_functions.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index ac2c378c..0f7a7810 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -28,7 +28,7 @@ -rabbit_upgrade({topic_trie, mnesia, []}). -rabbit_upgrade({semi_durable_route, mnesia, []}). -rabbit_upgrade({exchange_event_serial, mnesia, []}). --rabbit_upgrade({trace_exchanges, mnesia, []}). +-rabbit_upgrade({trace_exchanges, mnesia, [internal_exchanges]}). -rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}). -rabbit_upgrade({ha_mirrors, mnesia, []}). -rabbit_upgrade({gm, mnesia, []}). -- cgit v1.2.1 From 3c63aba85361beae3f5733574fd470222e4f3a8c Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 24 Jun 2011 21:02:57 +0100 Subject: remove transactions from backing queue ...and mostly from amqqueue_process --- include/rabbit_backing_queue_spec.hrl | 17 +-- src/rabbit_amqqueue_process.erl | 113 +++------------- src/rabbit_backing_queue.erl | 21 +-- src/rabbit_mirror_queue_master.erl | 32 +---- src/rabbit_tests.erl | 7 +- src/rabbit_variable_queue.erl | 236 ++++------------------------------ 6 files changed, 60 insertions(+), 366 deletions(-) diff --git a/include/rabbit_backing_queue_spec.hrl b/include/rabbit_backing_queue_spec.hrl index 295d9039..ee102f5e 100644 --- a/include/rabbit_backing_queue_spec.hrl +++ b/include/rabbit_backing_queue_spec.hrl @@ -26,12 +26,11 @@ fun ((rabbit_types:message_properties()) -> rabbit_types:message_properties())). -type(async_callback() :: fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok')). --type(sync_callback() :: fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok' | 'error')). -spec(start/1 :: ([rabbit_amqqueue:name()]) -> 'ok'). -spec(stop/0 :: () -> 'ok'). --spec(init/4 :: (rabbit_types:amqqueue(), attempt_recovery(), - async_callback(), sync_callback()) -> state()). +-spec(init/3 :: (rabbit_types:amqqueue(), attempt_recovery(), + async_callback()) -> state()). -spec(terminate/2 :: (any(), state()) -> state()). -spec(delete_and_terminate/2 :: (any(), state()) -> state()). -spec(purge/1 :: (state()) -> {purged_msg_count(), state()}). @@ -51,14 +50,6 @@ -spec(fetch/2 :: (true, state()) -> {fetch_result(ack()), state()}; (false, state()) -> {fetch_result(undefined), state()}). -spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}). --spec(tx_publish/5 :: (rabbit_types:txn(), rabbit_types:basic_message(), - rabbit_types:message_properties(), pid(), state()) -> - state()). --spec(tx_ack/3 :: (rabbit_types:txn(), [ack()], state()) -> state()). --spec(tx_rollback/2 :: (rabbit_types:txn(), state()) -> {[ack()], state()}). --spec(tx_commit/4 :: - (rabbit_types:txn(), fun (() -> any()), - message_properties_transformer(), state()) -> {[ack()], state()}). -spec(requeue/3 :: ([ack()], message_properties_transformer(), state()) -> {[rabbit_guid:guid()], state()}). -spec(len/1 :: (state()) -> non_neg_integer()). @@ -71,7 +62,7 @@ -spec(handle_pre_hibernate/1 :: (state()) -> state()). -spec(status/1 :: (state()) -> [{atom(), any()}]). -spec(invoke/3 :: (atom(), fun ((atom(), A) -> A), state()) -> state()). --spec(is_duplicate/3 :: - (rabbit_types:txn(), rabbit_types:basic_message(), state()) -> +-spec(is_duplicate/2 :: + (rabbit_types:basic_message(), state()) -> {'false'|'published'|'discarded', state()}). -spec(discard/3 :: (rabbit_types:basic_message(), pid(), state()) -> state()). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index c1fa048d..28fced98 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -62,7 +62,6 @@ monitor_ref, acktags, is_limit_active, - txn, unsent_message_count}). -define(STATISTICS_KEYS, @@ -194,13 +193,6 @@ bq_init(BQ, Q, Recover) -> BQ:init(Q, Recover, fun (Mod, Fun) -> rabbit_amqqueue:run_backing_queue_async(Self, Mod, Fun) - end, - fun (Mod, Fun) -> - rabbit_misc:with_exit_handler( - fun () -> error end, - fun () -> - rabbit_amqqueue:run_backing_queue(Self, Mod, Fun) - end) end). process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> @@ -217,22 +209,14 @@ init_expires(Expires, State) -> ensure_expiry_timer(State#q{expires = Expires}). init_ttl(TTL, State) -> drop_expired_messages(State#q{ttl = TTL}). terminate_shutdown(Fun, State) -> - State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = + State1 = #q{backing_queue_state = BQS} = stop_sync_timer(stop_rate_timer(State)), case BQS of undefined -> State; _ -> ok = rabbit_memory_monitor:deregister(self()), - BQS1 = lists:foldl( - fun (#cr{txn = none}, BQSN) -> - BQSN; - (#cr{txn = Txn}, BQSN) -> - {_AckTags, BQSN1} = - BQ:tx_rollback(Txn, BQSN), - BQSN1 - end, BQS, all_ch_record()), [emit_consumer_deleted(Ch, CTag) || {Ch, CTag, _} <- consumers(State1)], - State1#q{backing_queue_state = Fun(BQS1)} + State1#q{backing_queue_state = Fun(BQS)} end. reply(Reply, NewState) -> @@ -343,7 +327,6 @@ ch_record(ChPid) -> monitor_ref = MonitorRef, acktags = sets:new(), is_limit_active = false, - txn = none, unsent_message_count = 0}, put(Key, C), C; @@ -355,13 +338,12 @@ store_ch_record(C = #cr{ch_pid = ChPid}) -> maybe_store_ch_record(C = #cr{consumer_count = ConsumerCount, acktags = ChAckTags, - txn = Txn, unsent_message_count = UnsentMessageCount}) -> - case {sets:size(ChAckTags), ConsumerCount, UnsentMessageCount, Txn} of - {0, 0, 0, none} -> ok = erase_ch_record(C), - false; - _ -> store_ch_record(C), - true + case {sets:size(ChAckTags), ConsumerCount, UnsentMessageCount} of + {0, 0, 0} -> ok = erase_ch_record(C), + false; + _ -> store_ch_record(C), + true end. erase_ch_record(#cr{ch_pid = ChPid, @@ -523,7 +505,7 @@ attempt_delivery(Delivery = #delivery{txn = none, immediately -> rabbit_channel:confirm(ChPid, [MsgSeqNo]); _ -> ok end, - case BQ:is_duplicate(none, Message, BQS) of + case BQ:is_duplicate(Message, BQS) of {false, BQS1} -> PredFun = fun (IsEmpty, _State) -> not IsEmpty end, DeliverFun = @@ -555,24 +537,6 @@ attempt_delivery(Delivery = #delivery{txn = none, discarded -> false end, {Delivered, Confirm, State#q{backing_queue_state = BQS1}} - end; -attempt_delivery(Delivery = #delivery{txn = Txn, - sender = ChPid, - message = Message}, - State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> - Confirm = should_confirm_message(Delivery, State), - case BQ:is_duplicate(Txn, Message, BQS) of - {false, BQS1} -> - store_ch_record((ch_record(ChPid))#cr{txn = Txn}), - BQS2 = BQ:tx_publish(Txn, Message, ?BASE_MESSAGE_PROPERTIES, ChPid, - BQS1), - {true, Confirm, State#q{backing_queue_state = BQS2}}; - {Duplicate, BQS1} -> - Delivered = case Duplicate of - published -> true; - discarded -> false - end, - {Delivered, Confirm, State#q{backing_queue_state = BQS1}} end. deliver_or_enqueue(Delivery = #delivery{message = Message, @@ -652,7 +616,7 @@ handle_ch_down(DownPid, State = #q{exclusive_consumer = Holder}) -> case lookup_ch(DownPid) of not_found -> {ok, State}; - C = #cr{ch_pid = ChPid, txn = Txn, acktags = ChAckTags} -> + C = #cr{ch_pid = ChPid, acktags = ChAckTags} -> ok = erase_ch_record(C), State1 = State#q{ exclusive_consumer = case Holder of @@ -665,13 +629,8 @@ handle_ch_down(DownPid, State = #q{exclusive_consumer = Holder}) -> ChPid, State#q.blocked_consumers)}, case should_auto_delete(State1) of true -> {stop, State1}; - false -> State2 = case Txn of - none -> State1; - _ -> rollback_transaction(Txn, C, - State1) - end, - {ok, requeue_and_run(sets:to_list(ChAckTags), - ensure_expiry_timer(State2))} + false -> {ok, requeue_and_run(sets:to_list(ChAckTags), + ensure_expiry_timer(State1))} end end. @@ -705,25 +664,6 @@ run_backing_queue(Mod, Fun, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> run_message_queue(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)}). -commit_transaction(Txn, From, C = #cr{acktags = ChAckTags}, - State = #q{backing_queue = BQ, - backing_queue_state = BQS, - ttl = TTL}) -> - {AckTags, BQS1} = BQ:tx_commit( - Txn, fun () -> gen_server2:reply(From, ok) end, - reset_msg_expiry_fun(TTL), BQS), - ChAckTags1 = subtract_acks(ChAckTags, AckTags), - maybe_store_ch_record(C#cr{acktags = ChAckTags1, txn = none}), - State#q{backing_queue_state = BQS1}. - -rollback_transaction(Txn, C, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - {_AckTags, BQS1} = BQ:tx_rollback(Txn, BQS), - %% Iff we removed acktags from the channel record on ack+txn then - %% we would add them back in here. - maybe_store_ch_record(C#cr{txn = none}), - State#q{backing_queue_state = BQS1}. - subtract_acks(A, B) when is_list(B) -> lists:foldl(fun sets:del_element/2, A, B). @@ -933,13 +873,6 @@ handle_call({deliver, Delivery}, From, State) -> gen_server2:reply(From, true), noreply(deliver_or_enqueue(Delivery, State)); -handle_call({commit, Txn, ChPid}, From, State) -> - case lookup_ch(ChPid) of - not_found -> reply(ok, State); - C -> noreply(run_message_queue( - commit_transaction(Txn, From, C, State))) - end; - handle_call({notify_down, ChPid}, _From, State) -> %% we want to do this synchronously, so that auto_deleted queues %% are no longer visible by the time we send a response to the @@ -1095,24 +1028,16 @@ handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. noreply(deliver_or_enqueue(Delivery, State)); -handle_cast({ack, Txn, AckTags, ChPid}, +handle_cast({ack, none, AckTags, ChPid}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> case lookup_ch(ChPid) of not_found -> noreply(State); C = #cr{acktags = ChAckTags} -> - {C1, State1} = - case Txn of - none -> ChAckTags1 = subtract_acks(ChAckTags, AckTags), - NewC = C#cr{acktags = ChAckTags1}, - {_Guids, BQS1} = BQ:ack(AckTags, BQS), - {NewC, State#q{backing_queue_state = BQS1}}; - _ -> BQS1 = BQ:tx_ack(Txn, AckTags, BQS), - {C#cr{txn = Txn}, - State#q{backing_queue_state = BQS1}} - end, - maybe_store_ch_record(C1), - noreply(State1) + maybe_store_ch_record(C#cr{acktags = subtract_acks( + ChAckTags, AckTags)}), + {_Guids, BQS1} = BQ:ack(AckTags, BQS), + noreply(State#q{backing_queue_state = BQS1}) end; handle_cast({reject, AckTags, Requeue, ChPid}, @@ -1131,12 +1056,6 @@ handle_cast({reject, AckTags, Requeue, ChPid}, end) end; -handle_cast({rollback, Txn, ChPid}, State) -> - noreply(case lookup_ch(ChPid) of - not_found -> State; - C -> rollback_transaction(Txn, C, State) - end); - handle_cast(delete_immediately, State) -> {stop, normal, State}; diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl index 217ad3eb..77278416 100644 --- a/src/rabbit_backing_queue.erl +++ b/src/rabbit_backing_queue.erl @@ -44,9 +44,7 @@ behaviour_info(callbacks) -> %% makes it useful for passing messages back into the backing %% queue, especially as the backing queue does not have %% control of its own mailbox. - %% 4. a synchronous callback. Same as the asynchronous callback - %% but waits for completion and returns 'error' on error. - {init, 4}, + {init, 3}, %% Called on queue shutdown when queue isn't being deleted. {terminate, 2}, @@ -107,21 +105,6 @@ behaviour_info(callbacks) -> %% about. Must return 1 msg_id per Ack, in the same order as Acks. {ack, 2}, - %% A publish, but in the context of a transaction. - {tx_publish, 5}, - - %% Acks, but in the context of a transaction. - {tx_ack, 3}, - - %% Undo anything which has been done in the context of the - %% specified transaction. - {tx_rollback, 2}, - - %% Commit a transaction. The Fun passed in must be called once - %% the messages have really been commited. This CPS permits the - %% possibility of commit coalescing. - {tx_commit, 4}, - %% Reinsert messages into the queue which have already been %% delivered and were pending acknowledgement. {requeue, 3}, @@ -175,7 +158,7 @@ behaviour_info(callbacks) -> %% the BQ to signal that it's already seen this message (and in %% what capacity - i.e. was it published previously or discarded %% previously) and thus the message should be dropped. - {is_duplicate, 3}, + {is_duplicate, 2}, %% Called to inform the BQ about messages which have reached the %% queue, but are not going to be further passed to BQ for some diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 463b8cfb..082730e0 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -16,13 +16,12 @@ -module(rabbit_mirror_queue_master). --export([init/4, terminate/2, delete_and_terminate/2, +-export([init/3, terminate/2, delete_and_terminate/2, purge/1, publish/4, publish_delivered/5, fetch/2, ack/2, - tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, requeue/3, len/1, is_empty/1, drain_confirmed/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, is_duplicate/3, discard/3]). + status/1, invoke/3, is_duplicate/2, discard/3]). -export([start/1, stop/0]). @@ -72,7 +71,7 @@ sender_death_fun() -> end. init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover, - AsyncCallback, SyncCallback) -> + AsyncCallback) -> {ok, CPid} = rabbit_mirror_queue_coordinator:start_link( Q, undefined, sender_death_fun()), GM = rabbit_mirror_queue_coordinator:get_gm(CPid), @@ -84,7 +83,7 @@ init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover, end) -- [node()], [rabbit_mirror_queue_misc:add_mirror(QName, Node) || Node <- MNodes1], {ok, BQ} = application:get_env(backing_queue_module), - BQS = BQ:init(Q, Recover, AsyncCallback, SyncCallback), + BQS = BQ:init(Q, Recover, AsyncCallback), #state { gm = GM, coordinator = CPid, backing_queue = BQ, @@ -243,21 +242,6 @@ ack(AckTags, State = #state { gm = GM, {MsgIds, State #state { backing_queue_state = BQS1, ack_msg_id = AM1 }}. -tx_publish(_Txn, _Msg, _MsgProps, _ChPid, State) -> - %% We don't support txns in mirror queues - State. - -tx_ack(_Txn, _AckTags, State) -> - %% We don't support txns in mirror queues - State. - -tx_rollback(_Txn, State) -> - {[], State}. - -tx_commit(_Txn, PostCommitFun, _MsgPropsFun, State) -> - PostCommitFun(), %% Probably must run it to avoid deadlocks - {[], State}. - requeue(AckTags, MsgPropsFun, State = #state { gm = GM, backing_queue = BQ, backing_queue_state = BQS }) -> @@ -299,7 +283,7 @@ invoke(Mod, Fun, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }. -is_duplicate(none, Message = #basic_message { id = MsgId }, +is_duplicate(Message = #basic_message { id = MsgId }, State = #state { seen_status = SS, backing_queue = BQ, backing_queue_state = BQS, @@ -341,11 +325,7 @@ is_duplicate(none, Message = #basic_message { id = MsgId }, %% Don't erase from SS here because discard/2 is about to %% be called and we need to be able to detect this case {discarded, State} - end; -is_duplicate(_Txn, _Msg, State) -> - %% In a transaction. We don't support txns in mirror queues. But - %% it's probably not a duplicate... - {false, State}. + end. discard(Msg = #basic_message { id = MsgId }, ChPid, State = #state { gm = GM, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index f5492cdc..6e44c7a0 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1669,8 +1669,9 @@ test_backing_queue() -> passed = test_queue_index(), passed = test_queue_index_props(), passed = test_variable_queue(), - passed = test_variable_queue_delete_msg_store_files_callback(), - passed = test_queue_recover(), + %% FIXME: replace the use of tx in these with confirms + %% passed = test_variable_queue_delete_msg_store_files_callback(), + %% passed = test_queue_recover(), application:set_env(rabbit, queue_index_max_journal_entries, MaxJournal, infinity), passed; @@ -2084,7 +2085,7 @@ test_queue_index() -> variable_queue_init(Q, Recover) -> rabbit_variable_queue:init( - Q, Recover, fun nop/2, fun nop/2, fun nop/2, fun nop/1). + Q, Recover, fun nop/2, fun nop/2, fun nop/1). variable_queue_publish(IsPersistent, Count, VQ) -> lists:foldl( diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index a167cca0..630be00b 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -16,19 +16,18 @@ -module(rabbit_variable_queue). --export([init/4, terminate/2, delete_and_terminate/2, +-export([init/3, terminate/2, delete_and_terminate/2, purge/1, publish/4, publish_delivered/5, drain_confirmed/1, - fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, - requeue/3, len/1, is_empty/1, dropwhile/2, + fetch/2, ack/2, requeue/3, len/1, is_empty/1, dropwhile/2, set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, handle_pre_hibernate/1, - status/1, invoke/3, is_duplicate/3, discard/3, + status/1, invoke/3, is_duplicate/2, discard/3, multiple_routing_keys/0]). -export([start/1, stop/0]). %% exported for testing only --export([start_msg_store/2, stop_msg_store/0, init/6]). +-export([start_msg_store/2, stop_msg_store/0, init/5]). %%---------------------------------------------------------------------------- %% Definitions: @@ -238,12 +237,10 @@ ram_ack_index, index_state, msg_store_clients, - on_sync, durable, transient_threshold, async_callback, - sync_callback, len, persistent_count, @@ -284,10 +281,6 @@ end_seq_id %% end_seq_id is exclusive }). --record(tx, { pending_messages, pending_acks }). - --record(sync, { acks_persistent, acks_all, pubs, funs }). - %% When we discover, on publish, that we should write some indices to %% disk for some betas, the IO_BATCH_SIZE sets the number of betas %% that we must be due to write indices for before we do any work at @@ -320,12 +313,6 @@ count :: non_neg_integer(), end_seq_id :: non_neg_integer() }). --type(sync() :: #sync { acks_persistent :: [[seq_id()]], - acks_all :: [[seq_id()]], - pubs :: [{message_properties_transformer(), - [rabbit_types:basic_message()]}], - funs :: [fun (() -> any())] }). - -type(state() :: #vqstate { q1 :: queue(), q2 :: bpqueue:bpqueue(), @@ -338,12 +325,10 @@ index_state :: any(), msg_store_clients :: 'undefined' | {{any(), binary()}, {any(), binary()}}, - on_sync :: sync(), durable :: boolean(), transient_threshold :: non_neg_integer(), async_callback :: async_callback(), - sync_callback :: sync_callback(), len :: non_neg_integer(), persistent_count :: non_neg_integer(), @@ -376,11 +361,6 @@ count = 0, end_seq_id = Z }). --define(BLANK_SYNC, #sync { acks_persistent = [], - acks_all = [], - pubs = [], - funs = [] }). - %%---------------------------------------------------------------------------- %% Public API %%---------------------------------------------------------------------------- @@ -409,17 +389,17 @@ stop_msg_store() -> ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE), ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE). -init(Queue, Recover, AsyncCallback, SyncCallback) -> - init(Queue, Recover, AsyncCallback, SyncCallback, +init(Queue, Recover, AsyncCallback) -> + init(Queue, Recover, AsyncCallback, fun (MsgIds, ActionTaken) -> msgs_written_to_disk(AsyncCallback, MsgIds, ActionTaken) end, fun (MsgIds) -> msg_indices_written_to_disk(AsyncCallback, MsgIds) end). init(#amqqueue { name = QueueName, durable = IsDurable }, false, - AsyncCallback, SyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) -> + AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) -> IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun), - init(IsDurable, IndexState, 0, [], AsyncCallback, SyncCallback, + init(IsDurable, IndexState, 0, [], AsyncCallback, case IsDurable of true -> msg_store_client_init(?PERSISTENT_MSG_STORE, MsgOnDiskFun, AsyncCallback); @@ -428,7 +408,7 @@ init(#amqqueue { name = QueueName, durable = IsDurable }, false, msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback)); init(#amqqueue { name = QueueName, durable = true }, true, - AsyncCallback, SyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) -> + AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) -> Terms = rabbit_queue_index:shutdown_terms(QueueName), {PRef, TRef, Terms1} = case [persistent_ref, transient_ref] -- proplists:get_keys(Terms) of @@ -449,14 +429,14 @@ init(#amqqueue { name = QueueName, durable = true }, true, rabbit_msg_store:contains(MsgId, PersistentClient) end, MsgIdxOnDiskFun), - init(true, IndexState, DeltaCount, Terms1, AsyncCallback, SyncCallback, + init(true, IndexState, DeltaCount, Terms1, AsyncCallback, PersistentClient, TransientClient). terminate(_Reason, State) -> State1 = #vqstate { persistent_count = PCount, index_state = IndexState, msg_store_clients = {MSCStateP, MSCStateT} } = - remove_pending_ack(true, tx_commit_index(State)), + remove_pending_ack(true, State), PRef = case MSCStateP of undefined -> undefined; _ -> ok = rabbit_msg_store:client_terminate(MSCStateP), @@ -674,59 +654,6 @@ ack(AckTags, State) -> AckTags, State), {MsgIds, a(State1)}. -tx_publish(Txn, Msg = #basic_message { is_persistent = IsPersistent }, MsgProps, - _ChPid, State = #vqstate { durable = IsDurable, - msg_store_clients = MSCState }) -> - Tx = #tx { pending_messages = Pubs } = lookup_tx(Txn), - store_tx(Txn, Tx #tx { pending_messages = [{Msg, MsgProps} | Pubs] }), - case IsPersistent andalso IsDurable of - true -> MsgStatus = msg_status(true, undefined, Msg, MsgProps), - #msg_status { msg_on_disk = true } = - maybe_write_msg_to_disk(false, MsgStatus, MSCState); - false -> ok - end, - a(State). - -tx_ack(Txn, AckTags, State) -> - Tx = #tx { pending_acks = Acks } = lookup_tx(Txn), - store_tx(Txn, Tx #tx { pending_acks = [AckTags | Acks] }), - State. - -tx_rollback(Txn, State = #vqstate { durable = IsDurable, - msg_store_clients = MSCState }) -> - #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), - erase_tx(Txn), - ok = case IsDurable of - true -> msg_store_remove(MSCState, true, - persistent_msg_ids(Pubs)); - false -> ok - end, - {lists:append(AckTags), a(State)}. - -tx_commit(Txn, Fun, MsgPropsFun, - State = #vqstate { durable = IsDurable, - async_callback = AsyncCallback, - sync_callback = SyncCallback, - msg_store_clients = MSCState }) -> - #tx { pending_acks = AckTags, pending_messages = Pubs } = lookup_tx(Txn), - erase_tx(Txn), - AckTags1 = lists:append(AckTags), - PersistentMsgIds = persistent_msg_ids(Pubs), - HasPersistentPubs = PersistentMsgIds =/= [], - {AckTags1, - a(case IsDurable andalso HasPersistentPubs of - true -> MsgStoreCallback = - fun () -> msg_store_callback( - PersistentMsgIds, Pubs, AckTags1, Fun, - MsgPropsFun, AsyncCallback, SyncCallback) - end, - ok = msg_store_sync(MSCState, true, PersistentMsgIds, - fun () -> spawn(MsgStoreCallback) end), - State; - false -> tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags1, - Fun, MsgPropsFun, State) - end)}. - requeue(AckTags, MsgPropsFun, State) -> MsgPropsFun1 = fun (MsgProps) -> (MsgPropsFun(MsgProps)) #message_properties { @@ -832,23 +759,22 @@ ram_duration(State = #vqstate { ram_msg_count_prev = RamMsgCount, ram_ack_count_prev = RamAckCount }}. -needs_timeout(State = #vqstate { on_sync = OnSync }) -> - case {OnSync, needs_index_sync(State)} of - {?BLANK_SYNC, false} -> - case reduce_memory_use(fun (_Quota, State1) -> {0, State1} end, - fun (_Quota, State1) -> State1 end, - fun (State1) -> State1 end, - fun (_Quota, State1) -> {0, State1} end, - State) of - {true, _State} -> idle; - {false, _State} -> false - end; - _ -> - timed +needs_timeout(State) -> + case needs_index_sync(State) of + false -> case reduce_memory_use( + fun (_Quota, State1) -> {0, State1} end, + fun (_Quota, State1) -> State1 end, + fun (State1) -> State1 end, + fun (_Quota, State1) -> {0, State1} end, + State) of + {true, _State} -> idle; + {false, _State} -> false + end; + true -> timed end. timeout(State) -> - a(reduce_memory_use(confirm_commit_index(tx_commit_index(State)))). + a(reduce_memory_use(confirm_commit_index(State))). handle_pre_hibernate(State = #vqstate { index_state = IndexState }) -> State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }. @@ -858,7 +784,6 @@ status(#vqstate { len = Len, pending_ack = PA, ram_ack_index = RAI, - on_sync = #sync { funs = From }, target_ram_count = TargetRamCount, ram_msg_count = RamMsgCount, ram_index_count = RamIndexCount, @@ -875,7 +800,6 @@ status(#vqstate { {q4 , queue:len(Q4)}, {len , Len}, {pending_acks , dict:size(PA)}, - {outstanding_txns , length(From)}, {target_ram_count , TargetRamCount}, {ram_msg_count , RamMsgCount}, {ram_ack_count , gb_trees:size(RAI)}, @@ -887,10 +811,9 @@ status(#vqstate { {avg_ack_ingress_rate, AvgAckIngressRate}, {avg_ack_egress_rate , AvgAckEgressRate} ]. -invoke(?MODULE, Fun, State) -> - Fun(?MODULE, State). +invoke(?MODULE, Fun, State) -> Fun(?MODULE, State). -is_duplicate(_Txn, _Msg, State) -> {false, State}. +is_duplicate(_Msg, State) -> {false, State}. discard(_Msg, _ChPid, State) -> State. @@ -986,11 +909,6 @@ msg_store_remove(MSCState, IsPersistent, MsgIds) -> MSCState, IsPersistent, fun (MCSState1) -> rabbit_msg_store:remove(MsgIds, MCSState1) end). -msg_store_sync(MSCState, IsPersistent, MsgIds, Fun) -> - with_immutable_msg_store_state( - MSCState, IsPersistent, - fun (MSCState1) -> rabbit_msg_store:sync(MsgIds, Fun, MSCState1) end). - msg_store_close_fds(MSCState, IsPersistent) -> with_msg_store_state( MSCState, IsPersistent, @@ -1007,20 +925,6 @@ maybe_write_delivered(false, _SeqId, IndexState) -> maybe_write_delivered(true, SeqId, IndexState) -> rabbit_queue_index:deliver([SeqId], IndexState). -lookup_tx(Txn) -> case get({txn, Txn}) of - undefined -> #tx { pending_messages = [], - pending_acks = [] }; - V -> V - end. - -store_tx(Txn, Tx) -> put({txn, Txn}, Tx). - -erase_tx(Txn) -> erase({txn, Txn}). - -persistent_msg_ids(Pubs) -> - [MsgId || {#basic_message { id = MsgId, - is_persistent = true }, _MsgProps} <- Pubs]. - betas_from_index_entries(List, TransientThreshold, IndexState) -> {Filtered, Delivers, Acks} = lists:foldr( @@ -1084,8 +988,8 @@ update_rate(Now, Then, Count, {OThen, OCount}) -> %% Internal major helpers for Public API %%---------------------------------------------------------------------------- -init(IsDurable, IndexState, DeltaCount, Terms, - AsyncCallback, SyncCallback, PersistentClient, TransientClient) -> +init(IsDurable, IndexState, DeltaCount, Terms, AsyncCallback, + PersistentClient, TransientClient) -> {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState), DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount), @@ -1107,12 +1011,10 @@ init(IsDurable, IndexState, DeltaCount, Terms, ram_ack_index = gb_trees:empty(), index_state = IndexState1, msg_store_clients = {PersistentClient, TransientClient}, - on_sync = ?BLANK_SYNC, durable = IsDurable, transient_threshold = NextSeqId, async_callback = AsyncCallback, - sync_callback = SyncCallback, len = DeltaCount1, persistent_count = DeltaCount1, @@ -1141,88 +1043,6 @@ blank_rate(Timestamp, IngressLength) -> avg_ingress = 0.0, timestamp = Timestamp }. -msg_store_callback(PersistentMsgIds, Pubs, AckTags, Fun, MsgPropsFun, - AsyncCallback, SyncCallback) -> - case SyncCallback(?MODULE, - fun (?MODULE, StateN) -> - tx_commit_post_msg_store(true, Pubs, AckTags, - Fun, MsgPropsFun, StateN) - end) of - ok -> ok; - error -> remove_persistent_messages(PersistentMsgIds, AsyncCallback) - end. - -remove_persistent_messages(MsgIds, AsyncCallback) -> - PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, - undefined, AsyncCallback), - ok = rabbit_msg_store:remove(MsgIds, PersistentClient), - rabbit_msg_store:client_delete_and_terminate(PersistentClient). - -tx_commit_post_msg_store(HasPersistentPubs, Pubs, AckTags, Fun, MsgPropsFun, - State = #vqstate { - on_sync = OnSync = #sync { - acks_persistent = SPAcks, - acks_all = SAcks, - pubs = SPubs, - funs = SFuns }, - pending_ack = PA, - durable = IsDurable }) -> - PersistentAcks = - case IsDurable of - true -> [AckTag || AckTag <- AckTags, - case dict:fetch(AckTag, PA) of - #msg_status {} -> - false; - {IsPersistent, _MsgId, _MsgProps} -> - IsPersistent - end]; - false -> [] - end, - case IsDurable andalso (HasPersistentPubs orelse PersistentAcks =/= []) of - true -> State #vqstate { - on_sync = #sync { - acks_persistent = [PersistentAcks | SPAcks], - acks_all = [AckTags | SAcks], - pubs = [{MsgPropsFun, Pubs} | SPubs], - funs = [Fun | SFuns] }}; - false -> State1 = tx_commit_index( - State #vqstate { - on_sync = #sync { - acks_persistent = [], - acks_all = [AckTags], - pubs = [{MsgPropsFun, Pubs}], - funs = [Fun] } }), - State1 #vqstate { on_sync = OnSync } - end. - -tx_commit_index(State = #vqstate { on_sync = ?BLANK_SYNC }) -> - State; -tx_commit_index(State = #vqstate { on_sync = #sync { - acks_persistent = SPAcks, - acks_all = SAcks, - pubs = SPubs, - funs = SFuns }, - durable = IsDurable }) -> - PAcks = lists:append(SPAcks), - Acks = lists:append(SAcks), - Pubs = [{Msg, Fun(MsgProps)} || {Fun, PubsN} <- lists:reverse(SPubs), - {Msg, MsgProps} <- lists:reverse(PubsN)], - {_MsgIds, State1} = ack(Acks, State), - {SeqIds, State2 = #vqstate { index_state = IndexState }} = - lists:foldl( - fun ({Msg = #basic_message { is_persistent = IsPersistent }, - MsgProps}, - {SeqIdsAcc, State3}) -> - IsPersistent1 = IsDurable andalso IsPersistent, - {SeqId, State4} = - publish(Msg, MsgProps, false, IsPersistent1, State3), - {cons_if(IsPersistent1, SeqId, SeqIdsAcc), State4} - end, {PAcks, State1}, Pubs), - IndexState1 = rabbit_queue_index:sync(SeqIds, IndexState), - [ Fun() || Fun <- lists:reverse(SFuns) ], - reduce_memory_use( - State2 #vqstate { index_state = IndexState1, on_sync = ?BLANK_SYNC }). - purge_betas_and_deltas(LensByStore, State = #vqstate { q3 = Q3, index_state = IndexState, -- cgit v1.2.1 From bee6e92332ca8f3de1f7b043e04dd2fa4eea5eab Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 24 Jun 2011 22:36:22 +0100 Subject: tx gone from everywhere --- docs/rabbitmqctl.1.xml | 12 +-- include/rabbit.hrl | 3 +- src/rabbit_amqqueue.erl | 23 ++---- src/rabbit_amqqueue_process.erl | 7 +- src/rabbit_basic.erl | 32 ++++---- src/rabbit_channel.erl | 141 ++++++-------------------------- src/rabbit_control.erl | 2 +- src/rabbit_error_logger.erl | 2 +- src/rabbit_mirror_queue_coordinator.erl | 39 --------- src/rabbit_mirror_queue_slave.erl | 10 +-- src/rabbit_queue_index.erl | 18 ++-- src/rabbit_tests.erl | 15 ++-- src/rabbit_types.erl | 6 +- 13 files changed, 67 insertions(+), 243 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index a0f03192..fdb49912 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1195,10 +1195,6 @@ vhost Virtual host in which the channel operates. - - transactional - True if the channel is in transactional mode, false otherwise. - consumer_count Number of logical AMQP consumers retrieving messages via @@ -1209,11 +1205,6 @@ Number of messages delivered via this channel but not yet acknowledged. - - acks_uncommitted - Number of acknowledgements received in an as yet - uncommitted transaction. - prefetch_count QoS prefetch count limit in force, 0 if unlimited. @@ -1239,8 +1230,7 @@ If no channelinfoitems are specified then pid, - user, transactional, consumer_count, and - messages_unacknowledged are assumed. + user, consumer_count, and messages_unacknowledged are assumed. diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 00b7e6e9..3861df2a 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -67,8 +67,7 @@ is_persistent}). -record(ssl_socket, {tcp, ssl}). --record(delivery, {mandatory, immediate, txn, sender, message, - msg_seq_no}). +-record(delivery, {mandatory, immediate, sender, message, msg_seq_no}). -record(amqp_error, {name, explanation = "", method = none}). -record(event, {type, props, timestamp}). diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index bacb1d21..4d6aaa18 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -20,12 +20,12 @@ -export([pseudo_queue/2]). -export([lookup/1, with/2, with_or_die/2, assert_equivalence/5, check_exclusive_access/2, with_exclusive_access_or_die/3, - stat/1, deliver/2, requeue/3, ack/4, reject/4]). + stat/1, deliver/2, requeue/3, ack/3, reject/4]). -export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]). -export([consumers/1, consumers_all/1, consumer_info_keys/0]). -export([basic_get/3, basic_consume/7, basic_cancel/4]). -export([notify_sent/2, unblock/2, flush_all/2]). --export([commit_all/3, rollback_all/3, notify_down_all/2, limit_all/3]). +-export([notify_down_all/2, limit_all/3]). -export([on_node_down/1]). -export([store_queue/1]). @@ -117,12 +117,8 @@ -spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()). -spec(deliver/2 :: (pid(), rabbit_types:delivery()) -> boolean()). -spec(requeue/3 :: (pid(), [msg_id()], pid()) -> 'ok'). --spec(ack/4 :: - (pid(), rabbit_types:maybe(rabbit_types:txn()), [msg_id()], pid()) - -> 'ok'). +-spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok'). -spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok'). --spec(commit_all/3 :: ([pid()], rabbit_types:txn(), pid()) -> ok_or_errors()). --spec(rollback_all/3 :: ([pid()], rabbit_types:txn(), pid()) -> 'ok'). -spec(notify_down_all/2 :: ([pid()], pid()) -> ok_or_errors()). -spec(limit_all/3 :: ([pid()], pid(), pid() | 'undefined') -> ok_or_errors()). -spec(basic_get/3 :: (rabbit_types:amqqueue(), pid(), boolean()) -> @@ -436,21 +432,12 @@ deliver(QPid, Delivery) -> requeue(QPid, MsgIds, ChPid) -> delegate_call(QPid, {requeue, MsgIds, ChPid}). -ack(QPid, Txn, MsgIds, ChPid) -> - delegate_cast(QPid, {ack, Txn, MsgIds, ChPid}). +ack(QPid, MsgIds, ChPid) -> + delegate_cast(QPid, {ack, MsgIds, ChPid}). reject(QPid, MsgIds, Requeue, ChPid) -> delegate_cast(QPid, {reject, MsgIds, Requeue, ChPid}). -commit_all(QPids, Txn, ChPid) -> - safe_delegate_call_ok( - fun (QPid) -> gen_server2:call(QPid, {commit, Txn, ChPid}, infinity) end, - QPids). - -rollback_all(QPids, Txn, ChPid) -> - delegate:invoke_no_result( - QPids, fun (QPid) -> gen_server2:cast(QPid, {rollback, Txn, ChPid}) end). - notify_down_all(QPids, ChPid) -> safe_delegate_call_ok( fun (QPid) -> gen_server2:call(QPid, {notify_down, ChPid}, infinity) end, diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 28fced98..87cdf925 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -495,8 +495,7 @@ run_message_queue(State) -> {_IsEmpty1, State2} = deliver_msgs_to_consumers(Funs, IsEmpty, State1), State2. -attempt_delivery(Delivery = #delivery{txn = none, - sender = ChPid, +attempt_delivery(Delivery = #delivery{sender = ChPid, message = Message, msg_seq_no = MsgSeqNo}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> @@ -801,7 +800,7 @@ prioritise_cast(Msg, _State) -> maybe_expire -> 8; drop_expired -> 8; emit_stats -> 7; - {ack, _Txn, _AckTags, _ChPid} -> 7; + {ack, _AckTags, _ChPid} -> 7; {reject, _AckTags, _Requeue, _ChPid} -> 7; {notify_sent, _ChPid} -> 7; {unblock, _ChPid} -> 7; @@ -1028,7 +1027,7 @@ handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. noreply(deliver_or_enqueue(Delivery, State)); -handle_cast({ack, none, AckTags, ChPid}, +handle_cast({ack, AckTags, ChPid}, State = #q{backing_queue = BQ, backing_queue_state = BQS}) -> case lookup_ch(ChPid) of not_found -> diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index fa7e3a5a..ec8ed351 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -18,8 +18,8 @@ -include("rabbit.hrl"). -include("rabbit_framing.hrl"). --export([publish/1, message/3, message/4, properties/1, delivery/5]). --export([publish/4, publish/7]). +-export([publish/1, message/3, message/4, properties/1, delivery/4]). +-export([publish/4, publish/6]). -export([build_content/2, from_content/1]). %%---------------------------------------------------------------------------- @@ -37,9 +37,8 @@ -spec(publish/1 :: (rabbit_types:delivery()) -> publish_result()). --spec(delivery/5 :: - (boolean(), boolean(), rabbit_types:maybe(rabbit_types:txn()), - rabbit_types:message(), undefined | integer()) -> +-spec(delivery/4 :: + (boolean(), boolean(), rabbit_types:message(), undefined | integer()) -> rabbit_types:delivery()). -spec(message/4 :: (rabbit_exchange:name(), rabbit_router:routing_key(), @@ -53,10 +52,9 @@ -spec(publish/4 :: (exchange_input(), rabbit_router:routing_key(), properties_input(), body_input()) -> publish_result()). --spec(publish/7 :: +-spec(publish/6 :: (exchange_input(), rabbit_router:routing_key(), boolean(), boolean(), - rabbit_types:maybe(rabbit_types:txn()), properties_input(), - body_input()) -> publish_result()). + properties_input(), body_input()) -> publish_result()). -spec(build_content/2 :: (rabbit_framing:amqp_property_record(), binary() | [binary()]) -> rabbit_types:content()). -spec(from_content/1 :: (rabbit_types:content()) -> @@ -73,9 +71,9 @@ publish(Delivery = #delivery{ Other -> Other end. -delivery(Mandatory, Immediate, Txn, Message, MsgSeqNo) -> - #delivery{mandatory = Mandatory, immediate = Immediate, txn = Txn, - sender = self(), message = Message, msg_seq_no = MsgSeqNo}. +delivery(Mandatory, Immediate, Message, MsgSeqNo) -> + #delivery{mandatory = Mandatory, immediate = Immediate, sender = self(), + message = Message, msg_seq_no = MsgSeqNo}. build_content(Properties, BodyBin) when is_binary(BodyBin) -> build_content(Properties, [BodyBin]); @@ -157,19 +155,17 @@ indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1). %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. publish(Exchange, RoutingKeyBin, Properties, Body) -> - publish(Exchange, RoutingKeyBin, false, false, none, Properties, - Body). + publish(Exchange, RoutingKeyBin, false, false, Properties, Body). %% Convenience function, for avoiding round-trips in calls across the %% erlang distributed network. -publish(X = #exchange{name = XName}, RKey, Mandatory, Immediate, Txn, - Props, Body) -> - publish(X, delivery(Mandatory, Immediate, Txn, +publish(X = #exchange{name = XName}, RKey, Mandatory, Immediate, Props, Body) -> + publish(X, delivery(Mandatory, Immediate, message(XName, RKey, properties(Props), Body), undefined)); -publish(XName, RKey, Mandatory, Immediate, Txn, Props, Body) -> +publish(XName, RKey, Mandatory, Immediate, Props, Body) -> case rabbit_exchange:lookup(XName) of - {ok, X} -> publish(X, RKey, Mandatory, Immediate, Txn, Props, Body); + {ok, X} -> publish(X, RKey, Mandatory, Immediate, Props, Body); Err -> Err end. diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 991b0b06..36471bf5 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -30,8 +30,7 @@ prioritise_cast/2]). -record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, - limiter_pid, start_limiter_fun, transaction_id, tx_participants, - next_tag, uncommitted_ack_q, unacked_message_q, + limiter_pid, start_limiter_fun, next_tag, unacked_message_q, user, virtual_host, most_recently_declared_queue, consumer_mapping, blocking, consumer_monitors, queue_collector_pid, stats_timer, confirm_enabled, publish_seqno, unconfirmed_mq, @@ -41,12 +40,10 @@ -define(STATISTICS_KEYS, [pid, - transactional, confirm, consumer_count, messages_unacknowledged, messages_unconfirmed, - acks_uncommitted, prefetch_count, client_flow_blocked]). @@ -173,10 +170,7 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, conn_pid = ConnPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, - transaction_id = none, - tx_participants = sets:new(), next_tag = 1, - uncommitted_ack_q = queue:new(), unacked_message_q = queue:new(), user = User, virtual_host = VHost, @@ -331,7 +325,7 @@ handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> {hibernate, State#ch{stats_timer = StatsTimer1}}. terminate(Reason, State) -> - {Res, _State1} = rollback_and_notify(State), + {Res, _State1} = notify_queues(State), case Reason of normal -> ok = Res; shutdown -> ok = Res; @@ -386,8 +380,8 @@ send_exception(Reason, State = #ch{protocol = Protocol, rabbit_binary_generator:map_exception(Channel, Reason, Protocol), rabbit_log:error("connection ~p, channel ~p - error:~n~p~n", [ConnPid, Channel, Reason]), - %% something bad's happened: rollback_and_notify may not be 'ok' - {_Result, State1} = rollback_and_notify(State), + %% something bad's happened: notify_queues may not be 'ok' + {_Result, State1} = notify_queues(State), case CloseChannel of Channel -> ok = rabbit_writer:send_command(WriterPid, CloseMethod), {noreply, State1}; @@ -589,7 +583,7 @@ handle_method(_Method, _, State = #ch{state = closing}) -> {noreply, State}; handle_method(#'channel.close'{}, _, State = #ch{reader_pid = ReaderPid}) -> - {ok, State1} = rollback_and_notify(State), + {ok, State1} = notify_queues(State), ReaderPid ! {channel_closing, self()}, {noreply, State1}; @@ -601,7 +595,6 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, mandatory = Mandatory, immediate = Immediate}, Content, State = #ch{virtual_host = VHostPath, - transaction_id = TxnKey, confirm_enabled = ConfirmEnabled, trace_state = TraceState}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), @@ -623,19 +616,15 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, rabbit_trace:tap_trace_in(Message, TraceState), {RoutingRes, DeliveredQPids} = rabbit_exchange:publish( - Exchange, - rabbit_basic:delivery(Mandatory, Immediate, TxnKey, Message, - MsgSeqNo)), + Exchange, rabbit_basic:delivery(Mandatory, Immediate, Message, + MsgSeqNo)), State2 = process_routing_result(RoutingRes, DeliveredQPids, ExchangeName, MsgSeqNo, Message, State1), maybe_incr_stats([{ExchangeName, 1} | [{{QPid, ExchangeName}, 1} || QPid <- DeliveredQPids]], publish, State2), - {noreply, case TxnKey of - none -> State2; - _ -> add_tx_participants(DeliveredQPids, State2) - end}; + {noreply, State2}; {error, Reason} -> rabbit_misc:protocol_error(precondition_failed, "invalid message: ~p", [Reason]) @@ -649,22 +638,12 @@ handle_method(#'basic.nack'{delivery_tag = DeliveryTag, handle_method(#'basic.ack'{delivery_tag = DeliveryTag, multiple = Multiple}, - _, State = #ch{transaction_id = TxnKey, - unacked_message_q = UAMQ}) -> + _, State = #ch{unacked_message_q = UAMQ}) -> {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple), - QIncs = ack(TxnKey, Acked), - Participants = [QPid || {QPid, _} <- QIncs], + QIncs = ack(Acked), maybe_incr_stats(QIncs, ack, State), - {noreply, case TxnKey of - none -> ok = notify_limiter(State#ch.limiter_pid, Acked), - State#ch{unacked_message_q = Remaining}; - _ -> NewUAQ = queue:join(State#ch.uncommitted_ack_q, - Acked), - add_tx_participants( - Participants, - State#ch{unacked_message_q = Remaining, - uncommitted_ack_q = NewUAQ}) - end}; + ok = notify_limiter(State#ch.limiter_pid, Acked), + {noreply, State#ch{unacked_message_q = Remaining}}; handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, @@ -1048,35 +1027,6 @@ handle_method(#'queue.purge'{queue = QueueNameBin, #'queue.purge_ok'{message_count = PurgedMessageCount}); -handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) -> - rabbit_misc:protocol_error( - precondition_failed, "cannot switch from confirm to tx mode", []); - -handle_method(#'tx.select'{}, _, State = #ch{transaction_id = none}) -> - {reply, #'tx.select_ok'{}, new_tx(State)}; - -handle_method(#'tx.select'{}, _, State) -> - {reply, #'tx.select_ok'{}, State}; - -handle_method(#'tx.commit'{}, _, #ch{transaction_id = none}) -> - rabbit_misc:protocol_error( - precondition_failed, "channel is not transactional", []); - -handle_method(#'tx.commit'{}, _, State) -> - {reply, #'tx.commit_ok'{}, internal_commit(State)}; - -handle_method(#'tx.rollback'{}, _, #ch{transaction_id = none}) -> - rabbit_misc:protocol_error( - precondition_failed, "channel is not transactional", []); - -handle_method(#'tx.rollback'{}, _, State) -> - {reply, #'tx.rollback_ok'{}, internal_rollback(State)}; - -handle_method(#'confirm.select'{}, _, #ch{transaction_id = TxId}) - when TxId =/= none -> - rabbit_misc:protocol_error( - precondition_failed, "cannot switch from tx to confirm mode", []); - handle_method(#'confirm.select'{nowait = NoWait}, _, State) -> return_ok(State#ch{confirm_enabled = true}, NoWait, #'confirm.select_ok'{}); @@ -1252,55 +1202,17 @@ collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) -> precondition_failed, "unknown delivery tag ~w", [DeliveryTag]) end. -add_tx_participants(MoreP, State = #ch{tx_participants = Participants}) -> - State#ch{tx_participants = sets:union(Participants, - sets:from_list(MoreP))}. - -ack(TxnKey, UAQ) -> - fold_per_queue( - fun (QPid, MsgIds, L) -> - ok = rabbit_amqqueue:ack(QPid, TxnKey, MsgIds, self()), - [{QPid, length(MsgIds)} | L] - end, [], UAQ). - -make_tx_id() -> rabbit_guid:guid(). - -new_tx(State) -> - State#ch{transaction_id = make_tx_id(), - tx_participants = sets:new(), - uncommitted_ack_q = queue:new()}. - -internal_commit(State = #ch{transaction_id = TxnKey, - tx_participants = Participants}) -> - case rabbit_amqqueue:commit_all(sets:to_list(Participants), - TxnKey, self()) of - ok -> ok = notify_limiter(State#ch.limiter_pid, - State#ch.uncommitted_ack_q), - new_tx(State); - {error, Errors} -> rabbit_misc:protocol_error( - internal_error, "commit failed: ~w", [Errors]) - end. +ack(UAQ) -> + fold_per_queue(fun (QPid, MsgIds, L) -> + ok = rabbit_amqqueue:ack(QPid, MsgIds, self()), + [{QPid, length(MsgIds)} | L] + end, [], UAQ). -internal_rollback(State = #ch{transaction_id = TxnKey, - tx_participants = Participants, - uncommitted_ack_q = UAQ, - unacked_message_q = UAMQ}) -> - ?LOGDEBUG("rollback ~p~n - ~p acks uncommitted, ~p messages unacked~n", - [self(), - queue:len(UAQ), - queue:len(UAMQ)]), - ok = rabbit_amqqueue:rollback_all(sets:to_list(Participants), - TxnKey, self()), - NewUAMQ = queue:join(UAQ, UAMQ), - new_tx(State#ch{unacked_message_q = NewUAMQ}). - -rollback_and_notify(State = #ch{state = closing}) -> +notify_queues(State = #ch{state = closing}) -> {ok, State}; -rollback_and_notify(State = #ch{transaction_id = none}) -> - {notify_queues(State), State#ch{state = closing}}; -rollback_and_notify(State) -> - State1 = internal_rollback(State), - {notify_queues(State1), State1#ch{state = closing}}. +notify_queues(State = #ch{consumer_mapping = Consumers}) -> + {rabbit_amqqueue:notify_down_all(consumer_queues(Consumers), self()), + State#ch{state = closing}}. fold_per_queue(F, Acc0, UAQ) -> D = rabbit_misc:queue_fold( @@ -1319,9 +1231,6 @@ start_limiter(State = #ch{unacked_message_q = UAMQ, start_limiter_fun = SLF}) -> ok = limit_queues(LPid, State), LPid. -notify_queues(#ch{consumer_mapping = Consumers}) -> - rabbit_amqqueue:notify_down_all(consumer_queues(Consumers), self()). - unlimit_queues(State) -> ok = limit_queues(undefined, State), undefined. @@ -1436,17 +1345,13 @@ i(connection, #ch{conn_pid = ConnPid}) -> ConnPid; i(number, #ch{channel = Channel}) -> Channel; i(user, #ch{user = User}) -> User#user.username; i(vhost, #ch{virtual_host = VHost}) -> VHost; -i(transactional, #ch{transaction_id = TxnKey}) -> TxnKey =/= none; i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); i(messages_unconfirmed, #ch{unconfirmed_mq = UMQ}) -> gb_trees:size(UMQ); -i(messages_unacknowledged, #ch{unacked_message_q = UAMQ, - uncommitted_ack_q = UAQ}) -> - queue:len(UAMQ) + queue:len(UAQ); -i(acks_uncommitted, #ch{uncommitted_ack_q = UAQ}) -> - queue:len(UAQ); +i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> + queue:len(UAMQ); i(prefetch_count, #ch{limiter_pid = LimiterPid}) -> rabbit_limiter:get_limit(LimiterPid); i(client_flow_blocked, #ch{limiter_pid = LimiterPid}) -> diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 9eef384a..6eb1aaba 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -301,7 +301,7 @@ action(list_connections, Node, Args, _Opts, Inform) -> action(list_channels, Node, Args, _Opts, Inform) -> Inform("Listing channels", []), - ArgAtoms = default_if_empty(Args, [pid, user, transactional, consumer_count, + ArgAtoms = default_if_empty(Args, [pid, user, consumer_count, messages_unacknowledged]), display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms]), ArgAtoms); diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl index 3fb0817a..93aad9e3 100644 --- a/src/rabbit_error_logger.erl +++ b/src/rabbit_error_logger.erl @@ -71,7 +71,7 @@ publish1(RoutingKey, Format, Data, LogExch) -> %% second resolution, not millisecond. Timestamp = rabbit_misc:now_ms() div 1000, {ok, _RoutingRes, _DeliveredQPids} = - rabbit_basic:publish(LogExch, RoutingKey, false, false, none, + rabbit_basic:publish(LogExch, RoutingKey, false, false, #'P_basic'{content_type = <<"text/plain">>, timestamp = Timestamp}, list_to_binary(io_lib:format(Format, Data))), diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 2727c1d0..d1c3a2e5 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -254,45 +254,6 @@ %% sender_death message. The slave will then be able to tidy up its %% state as normal. %% -%% We don't support transactions on mirror queues. To do so is -%% challenging. The underlying bq is free to add the contents of the -%% txn to the queue proper at any point after the tx.commit comes in -%% but before the tx.commit-ok goes out. This means that it is not -%% safe for all mirrors to simply issue the bq:tx_commit at the same -%% time, as the addition of the txn's contents to the queue may -%% subsequently be inconsistently interwoven with other actions on the -%% bq. The solution to this is, in the master, wrap the PostCommitFun -%% and do the gm:broadcast in there: at that point, you're in the bq -%% (well, there's actually nothing to stop that function being invoked -%% by some other process, but let's pretend for now: you could always -%% use run_backing_queue to ensure you really are in the queue process -%% (the _async variant would be unsafe from an ordering pov)), the -%% gm:broadcast is safe because you don't have to worry about races -%% with other gm:broadcast calls (same process). Thus this signal -%% would indicate sufficiently to all the slaves that they must insert -%% the complete contents of the txn at precisely this point in the -%% stream of events. -%% -%% However, it's quite difficult for the slaves to make that happen: -%% they would be forced to issue the bq:tx_commit at that point, but -%% then stall processing any further instructions from gm until they -%% receive the notification from their bq that the tx_commit has fully -%% completed (i.e. they need to treat what is an async system as being -%% fully synchronous). This is not too bad (apart from the -%% vomit-inducing notion of it all): just need a queue of instructions -%% from the GM; but then it gets rather worse when you consider what -%% needs to happen if the master dies at this point and the slave in -%% the middle of this tx_commit needs to be promoted. -%% -%% Finally, we can't possibly hope to make transactions atomic across -%% mirror queues, and it's not even clear that that's desirable: if a -%% slave fails whilst there's an open transaction in progress then -%% when the channel comes to commit the txn, it will detect the -%% failure and destroy the channel. However, the txn will have -%% actually committed successfully in all the other mirrors (including -%% master). To do this bit properly would require 2PC and all the -%% baggage that goes with that. -%% %% Recovery of mirrored queues is straightforward: as nodes die, the %% remaining nodes record this, and eventually a situation is reached %% in which only one node is alive, which is the master. This is the diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 55d61d41..66ff575f 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -488,7 +488,7 @@ promote_me(From, #state { q = Q, %% %% Everything that's in MA gets requeued. Consequently the new %% master should start with a fresh AM as there are no messages - %% pending acks (txns will have been rolled back). + %% pending acks. MSList = dict:to_list(MS), SS = dict:from_list( @@ -612,8 +612,7 @@ confirm_sender_death(Pid) -> maybe_enqueue_message( Delivery = #delivery { message = #basic_message { id = MsgId }, msg_seq_no = MsgSeqNo, - sender = ChPid, - txn = none }, + sender = ChPid }, EnqueueOnPromotion, State = #state { sender_queues = SQ, msg_id_status = MS }) -> State1 = ensure_monitoring(ChPid, State), @@ -655,10 +654,7 @@ maybe_enqueue_message( SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ), State1 #state { msg_id_status = dict:erase(MsgId, MS), sender_queues = SQ1 } - end; -maybe_enqueue_message(_Delivery, _EnqueueOnPromotion, State) -> - %% We don't support txns in mirror queues. - State. + end. get_sender_queue(ChPid, SQ) -> case dict:find(ChPid, SQ) of diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl index aaf3df78..bf89cdb2 100644 --- a/src/rabbit_queue_index.erl +++ b/src/rabbit_queue_index.erl @@ -76,11 +76,10 @@ %% the segment file combined with the journal, no writing needs to be %% done to the segment file either (in fact it is deleted if it exists %% at all). This is safe given that the set of acks is a subset of the -%% set of publishes. When it's necessary to sync messages because of -%% transactions, it's only necessary to fsync on the journal: when -%% entries are distributed from the journal to segment files, those -%% segments appended to are fsync'd prior to the journal being -%% truncated. +%% set of publishes. When it is necessary to sync messages, it is +%% sufficient to fsync on the journal: when entries are distributed +%% from the journal to segment files, those segments appended to are +%% fsync'd prior to the journal being truncated. %% %% This module is also responsible for scanning the queue index files %% and seeding the message store on start up. @@ -289,14 +288,13 @@ sync(State = #qistate { unsynced_msg_ids = MsgIds }) -> sync_if([] =/= MsgIds, State). sync(SeqIds, State) -> - %% The SeqIds here contains the SeqId of every publish and ack in - %% the transaction. Ideally we should go through these seqids and - %% only sync the journal if the pubs or acks appear in the + %% The SeqIds here contains the SeqId of every publish and ack to + %% be sync'ed. Ideally we should go through these seqids and only + %% sync the journal if the pubs or acks appear in the %% journal. However, this would be complex to do, and given that %% the variable queue publishes and acks to the qi, and then %% syncs, all in one operation, there is no possibility of the - %% seqids not being in the journal, provided the transaction isn't - %% emptied (handled by sync_if anyway). + %% seqids not being in the journal. sync_if([] =/= SeqIds, State). flush(State = #qistate { dirty_count = 0 }) -> State; diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 6e44c7a0..bc1b00b2 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -705,7 +705,6 @@ test_topic_expect_match(X, List) -> Res = rabbit_exchange_type_topic:route( X, #delivery{mandatory = false, immediate = false, - txn = none, sender = self(), message = Message}), ExpectedRes = lists:map( @@ -1669,8 +1668,8 @@ test_backing_queue() -> passed = test_queue_index(), passed = test_queue_index_props(), passed = test_variable_queue(), - %% FIXME: replace the use of tx in these with confirms - %% passed = test_variable_queue_delete_msg_store_files_callback(), + passed = test_variable_queue_delete_msg_store_files_callback(), + %% FIXME: re-enable once fixed %% passed = test_queue_recover(), application:set_env(rabbit, queue_index_max_journal_entries, MaxJournal, infinity), @@ -2319,17 +2318,16 @@ test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) -> test_queue_recover() -> Count = 2 * rabbit_queue_index:next_segment_boundary(0), - TxID = rabbit_guid:guid(), {new, #amqqueue { pid = QPid, name = QName } = Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), [begin Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), <<>>, #'P_basic'{delivery_mode = 2}, <<>>), - Delivery = #delivery{mandatory = false, immediate = false, txn = TxID, + Delivery = #delivery{mandatory = false, immediate = false, sender = self(), message = Msg}, true = rabbit_amqqueue:deliver(QPid, Delivery) end || _ <- lists:seq(1, Count)], - rabbit_amqqueue:commit_all([QPid], TxID, self()), + %% FIXME: wait for confirms of all publishes exit(QPid, kill), MRef = erlang:monitor(process, QPid), receive {'DOWN', MRef, process, QPid, _Info} -> ok @@ -2356,18 +2354,17 @@ test_variable_queue_delete_msg_store_files_callback() -> ok = restart_msg_store_empty(), {new, #amqqueue { pid = QPid, name = QName } = Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), - TxID = rabbit_guid:guid(), Payload = <<0:8388608>>, %% 1MB Count = 30, [begin Msg = rabbit_basic:message( rabbit_misc:r(<<>>, exchange, <<>>), <<>>, #'P_basic'{delivery_mode = 2}, Payload), - Delivery = #delivery{mandatory = false, immediate = false, txn = TxID, + Delivery = #delivery{mandatory = false, immediate = false, sender = self(), message = Msg}, true = rabbit_amqqueue:deliver(QPid, Delivery) end || _ <- lists:seq(1, Count)], - rabbit_amqqueue:commit_all([QPid], TxID, self()), + %% FIXME: wait for confirms of all publishes rabbit_amqqueue:set_ram_duration_target(QPid, 0), CountMinusOne = Count - 1, diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl index 03b2c9e8..2db960ac 100644 --- a/src/rabbit_types.erl +++ b/src/rabbit_types.erl @@ -20,7 +20,7 @@ -ifdef(use_specs). --export_type([txn/0, maybe/1, info/0, infos/0, info_key/0, info_keys/0, +-export_type([maybe/1, info/0, infos/0, info_key/0, info_keys/0, message/0, msg_id/0, basic_message/0, delivery/0, content/0, decoded_content/0, undecoded_content/0, unencoded_content/0, encoded_content/0, message_properties/0, @@ -73,16 +73,12 @@ -type(delivery() :: #delivery{mandatory :: boolean(), immediate :: boolean(), - txn :: maybe(txn()), sender :: pid(), message :: message()}). -type(message_properties() :: #message_properties{expiry :: pos_integer() | 'undefined', needs_confirming :: boolean()}). -%% this is really an abstract type, but dialyzer does not support them --type(txn() :: rabbit_guid:guid()). - -type(info_key() :: atom()). -type(info_keys() :: [info_key()]). -- cgit v1.2.1 From 837bbfce10e9ea2fdcb64cab40a9e43a72c5db79 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 07:03:13 +0100 Subject: fix tests --- src/rabbit_tests.erl | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index bc1b00b2..033b65a0 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1669,8 +1669,7 @@ test_backing_queue() -> passed = test_queue_index_props(), passed = test_variable_queue(), passed = test_variable_queue_delete_msg_store_files_callback(), - %% FIXME: re-enable once fixed - %% passed = test_queue_recover(), + passed = test_queue_recover(), application:set_env(rabbit, queue_index_max_journal_entries, MaxJournal, infinity), passed; @@ -2129,6 +2128,29 @@ with_fresh_variable_queue(Fun) -> _ = rabbit_variable_queue:delete_and_terminate(shutdown, Fun(VQ)), passed. +publish_and_confirm(QPid, Payload, Count) -> + Seqs = lists:seq(1, Count), + [begin + Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{delivery_mode = 2}, + Payload), + Delivery = #delivery{mandatory = false, immediate = false, + sender = self(), message = Msg, msg_seq_no = Seq}, + true = rabbit_amqqueue:deliver(QPid, Delivery) + end || Seq <- Seqs], + wait_for_confirms(gb_sets:from_list(Seqs)). + +wait_for_confirms(Unconfirmed) -> + case gb_sets:is_empty(Unconfirmed) of + true -> ok; + false -> receive {'$gen_cast', {confirm, Confirmed, _}} -> + wait_for_confirms( + gb_sets:difference(Unconfirmed, + gb_sets:from_list(Confirmed))) + after 1000 -> exit(timeout_waiting_for_confirm) + end + end. + test_variable_queue() -> [passed = with_fresh_variable_queue(F) || F <- [fun test_variable_queue_dynamic_duration_change/1, @@ -2320,14 +2342,8 @@ test_queue_recover() -> Count = 2 * rabbit_queue_index:next_segment_boundary(0), {new, #amqqueue { pid = QPid, name = QName } = Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), - [begin - Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>), - <<>>, #'P_basic'{delivery_mode = 2}, <<>>), - Delivery = #delivery{mandatory = false, immediate = false, - sender = self(), message = Msg}, - true = rabbit_amqqueue:deliver(QPid, Delivery) - end || _ <- lists:seq(1, Count)], - %% FIXME: wait for confirms of all publishes + publish_and_confirm(QPid, <<>>, Count), + exit(QPid, kill), MRef = erlang:monitor(process, QPid), receive {'DOWN', MRef, process, QPid, _Info} -> ok @@ -2356,15 +2372,8 @@ test_variable_queue_delete_msg_store_files_callback() -> rabbit_amqqueue:declare(test_queue(), true, false, [], none), Payload = <<0:8388608>>, %% 1MB Count = 30, - [begin - Msg = rabbit_basic:message( - rabbit_misc:r(<<>>, exchange, <<>>), - <<>>, #'P_basic'{delivery_mode = 2}, Payload), - Delivery = #delivery{mandatory = false, immediate = false, - sender = self(), message = Msg}, - true = rabbit_amqqueue:deliver(QPid, Delivery) - end || _ <- lists:seq(1, Count)], - %% FIXME: wait for confirms of all publishes + publish_and_confirm(QPid, Payload, Count), + rabbit_amqqueue:set_ram_duration_target(QPid, 0), CountMinusOne = Count - 1, -- cgit v1.2.1 From 52d694ca254671430d325204b1a6ac052377445b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sat, 25 Jun 2011 12:14:26 +0100 Subject: Correct bug. This was introduced in bug 24116. --- src/rabbit_variable_queue.erl | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index a167cca0..e1c78307 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -576,10 +576,16 @@ dropwhile1(Pred, State) -> in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, State = #vqstate { q3 = Q3, q4 = Q4, ram_index_count = RamIndexCount }) -> - true = queue:is_empty(Q4), %% ASSERTION - State #vqstate { - q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), - ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; + case queue:is_empty(Q4) of + true -> + State #vqstate { + q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; + false -> + {MsgStatus1, State1 = #vqstate { q4 = Q4a }} = + read_msg(MsgStatus, State), + State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4a) } + end; in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. -- cgit v1.2.1 From ab045589340e66916cc7be820dc36aa2970f1ec2 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 14:47:32 +0100 Subject: minor refactoring and simplification --- src/rabbit_variable_queue.erl | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index e1c78307..fd7bf2cc 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -577,14 +577,11 @@ dropwhile1(Pred, State) -> in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, State = #vqstate { q3 = Q3, q4 = Q4, ram_index_count = RamIndexCount }) -> case queue:is_empty(Q4) of - true -> - State #vqstate { - q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), - ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; - false -> - {MsgStatus1, State1 = #vqstate { q4 = Q4a }} = - read_msg(MsgStatus, State), - State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4a) } + true -> State #vqstate { + q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; + false -> {MsgStatus1, State1} = read_msg(MsgStatus, State), + State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4) } end; in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. -- cgit v1.2.1 From 95f25372cf59874c0949db00f9841c17374be0f2 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 15:17:49 +0100 Subject: refactor 'dropwhile' and 'fetch' - rename 'internal_queue_out' to 'queue_out' and turn it from 2nd to 1st order, thus making it more analogous to queue:out - only assert state invariants at public API return points, not inside helper functions - inline 'dropwhile1' into 'dropwhile' --- src/rabbit_variable_queue.erl | 60 +++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index fd7bf2cc..c838e2d7 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -560,19 +560,16 @@ drain_confirmed(State = #vqstate { confirmed = C }) -> {gb_sets:to_list(C), State #vqstate { confirmed = gb_sets:new() }}. dropwhile(Pred, State) -> - {_OkOrEmpty, State1} = dropwhile1(Pred, State), - a(State1). - -dropwhile1(Pred, State) -> - internal_queue_out( - fun(MsgStatus = #msg_status { msg_props = MsgProps }, State1) -> - case Pred(MsgProps) of - true -> {_, State2} = internal_fetch(false, MsgStatus, - State1), - dropwhile1(Pred, State2); - false -> {ok, in_r(MsgStatus, State1)} - end - end, State). + case queue_out(State) of + {empty, State1} -> + a(State1); + {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} -> + case Pred(MsgProps) of + true -> {_, State2} = internal_fetch(false, MsgStatus, State1), + dropwhile(Pred, State2); + false -> a(in_r(MsgStatus, State1)) + end + end. in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, State = #vqstate { q3 = Q3, q4 = Q4, ram_index_count = RamIndexCount }) -> @@ -587,23 +584,26 @@ in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. fetch(AckRequired, State) -> - internal_queue_out( - fun(MsgStatus, State1) -> - %% it's possible that the message wasn't read from disk - %% at this point, so read it in. - {MsgStatus1, State2} = read_msg(MsgStatus, State1), - internal_fetch(AckRequired, MsgStatus1, State2) - end, State). - -internal_queue_out(Fun, State = #vqstate { q4 = Q4 }) -> + case queue_out(State) of + {empty, State1} -> + {empty, a(State1)}; + {{value, MsgStatus}, State1} -> + %% it is possible that the message wasn't read from disk + %% at this point, so read it in. + {MsgStatus1, State2} = read_msg(MsgStatus, State1), + {Res, State3} = internal_fetch(AckRequired, MsgStatus1, State2), + {Res, a(State3)} + end. + +queue_out(State = #vqstate { q4 = Q4 }) -> case queue:out(Q4) of {empty, _Q4} -> case fetch_from_q3(State) of - {empty, State1} = Result -> a(State1), Result; - {loaded, {MsgStatus, State1}} -> Fun(MsgStatus, State1) + {empty, _State1} = Result -> Result; + {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1} end; {{value, MsgStatus}, Q4a} -> - Fun(MsgStatus, State #vqstate { q4 = Q4a }) + {{value, MsgStatus}, State #vqstate { q4 = Q4a }} end. read_msg(MsgStatus = #msg_status { msg = undefined, @@ -665,11 +665,11 @@ internal_fetch(AckRequired, MsgStatus = #msg_status { RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined), {{Msg, IsDelivered, AckTag, Len1}, - a(State1 #vqstate { ram_msg_count = RamMsgCount1, - out_counter = OutCount + 1, - index_state = IndexState2, - len = Len1, - persistent_count = PCount1 })}. + State1 #vqstate { ram_msg_count = RamMsgCount1, + out_counter = OutCount + 1, + index_state = IndexState2, + len = Len1, + persistent_count = PCount1 }}. ack(AckTags, State) -> {MsgIds, State1} = ack(fun msg_store_remove/3, -- cgit v1.2.1 From db26ed9e1f432756c4906b371f5ae9f88d5106ec Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 15:31:10 +0100 Subject: cosmetic: make order of exports match order in behaviour --- src/rabbit_variable_queue.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index c838e2d7..b8fbf140 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -18,8 +18,9 @@ -export([init/4, terminate/2, delete_and_terminate/2, purge/1, publish/4, publish_delivered/5, drain_confirmed/1, - fetch/2, ack/2, tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, - requeue/3, len/1, is_empty/1, dropwhile/2, + dropwhile/2, fetch/2, ack/2, + tx_publish/5, tx_ack/3, tx_rollback/2, tx_commit/4, + requeue/3, len/1, is_empty/1, set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1, handle_pre_hibernate/1, status/1, invoke/3, is_duplicate/3, discard/3, -- cgit v1.2.1 From 4a41e885f65c9db368166c8dae2777af6dc57993 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 15:31:27 +0100 Subject: cosmetic: move helpers to where they belong --- src/rabbit_variable_queue.erl | 176 +++++++++++++++++++++--------------------- 1 file changed, 88 insertions(+), 88 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index b8fbf140..e997bb61 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -572,18 +572,6 @@ dropwhile(Pred, State) -> end end. -in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, - State = #vqstate { q3 = Q3, q4 = Q4, ram_index_count = RamIndexCount }) -> - case queue:is_empty(Q4) of - true -> State #vqstate { - q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), - ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; - false -> {MsgStatus1, State1} = read_msg(MsgStatus, State), - State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4) } - end; -in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> - State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. - fetch(AckRequired, State) -> case queue_out(State) of {empty, State1} -> @@ -596,82 +584,6 @@ fetch(AckRequired, State) -> {Res, a(State3)} end. -queue_out(State = #vqstate { q4 = Q4 }) -> - case queue:out(Q4) of - {empty, _Q4} -> - case fetch_from_q3(State) of - {empty, _State1} = Result -> Result; - {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1} - end; - {{value, MsgStatus}, Q4a} -> - {{value, MsgStatus}, State #vqstate { q4 = Q4a }} - end. - -read_msg(MsgStatus = #msg_status { msg = undefined, - msg_id = MsgId, - is_persistent = IsPersistent }, - State = #vqstate { ram_msg_count = RamMsgCount, - msg_store_clients = MSCState}) -> - {{ok, Msg = #basic_message {}}, MSCState1} = - msg_store_read(MSCState, IsPersistent, MsgId), - {MsgStatus #msg_status { msg = Msg }, - State #vqstate { ram_msg_count = RamMsgCount + 1, - msg_store_clients = MSCState1 }}; -read_msg(MsgStatus, State) -> - {MsgStatus, State}. - -internal_fetch(AckRequired, MsgStatus = #msg_status { - seq_id = SeqId, - msg_id = MsgId, - msg = Msg, - is_persistent = IsPersistent, - is_delivered = IsDelivered, - msg_on_disk = MsgOnDisk, - index_on_disk = IndexOnDisk }, - State = #vqstate {ram_msg_count = RamMsgCount, - out_counter = OutCount, - index_state = IndexState, - msg_store_clients = MSCState, - len = Len, - persistent_count = PCount }) -> - %% 1. Mark it delivered if necessary - IndexState1 = maybe_write_delivered( - IndexOnDisk andalso not IsDelivered, - SeqId, IndexState), - - %% 2. Remove from msg_store and queue index, if necessary - Rem = fun () -> - ok = msg_store_remove(MSCState, IsPersistent, [MsgId]) - end, - Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end, - IndexState2 = - case {AckRequired, MsgOnDisk, IndexOnDisk, IsPersistent} of - {false, true, false, _} -> Rem(), IndexState1; - {false, true, true, _} -> Rem(), Ack(); - { true, true, true, false} -> Ack(); - _ -> IndexState1 - end, - - %% 3. If an ack is required, add something sensible to PA - {AckTag, State1} = case AckRequired of - true -> StateN = record_pending_ack( - MsgStatus #msg_status { - is_delivered = true }, State), - {SeqId, StateN}; - false -> {undefined, State} - end, - - PCount1 = PCount - one_if(IsPersistent andalso not AckRequired), - Len1 = Len - 1, - RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined), - - {{Msg, IsDelivered, AckTag, Len1}, - State1 #vqstate { ram_msg_count = RamMsgCount1, - out_counter = OutCount + 1, - index_state = IndexState2, - len = Len1, - persistent_count = PCount1 }}. - ack(AckTags, State) -> {MsgIds, State1} = ack(fun msg_store_remove/3, fun (_, State0) -> State0 end, @@ -1145,6 +1057,94 @@ blank_rate(Timestamp, IngressLength) -> avg_ingress = 0.0, timestamp = Timestamp }. +in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, + State = #vqstate { q3 = Q3, q4 = Q4, ram_index_count = RamIndexCount }) -> + case queue:is_empty(Q4) of + true -> State #vqstate { + q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), + ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; + false -> {MsgStatus1, State1} = read_msg(MsgStatus, State), + State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4) } + end; +in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> + State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. + +queue_out(State = #vqstate { q4 = Q4 }) -> + case queue:out(Q4) of + {empty, _Q4} -> + case fetch_from_q3(State) of + {empty, _State1} = Result -> Result; + {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1} + end; + {{value, MsgStatus}, Q4a} -> + {{value, MsgStatus}, State #vqstate { q4 = Q4a }} + end. + +read_msg(MsgStatus = #msg_status { msg = undefined, + msg_id = MsgId, + is_persistent = IsPersistent }, + State = #vqstate { ram_msg_count = RamMsgCount, + msg_store_clients = MSCState}) -> + {{ok, Msg = #basic_message {}}, MSCState1} = + msg_store_read(MSCState, IsPersistent, MsgId), + {MsgStatus #msg_status { msg = Msg }, + State #vqstate { ram_msg_count = RamMsgCount + 1, + msg_store_clients = MSCState1 }}; +read_msg(MsgStatus, State) -> + {MsgStatus, State}. + +internal_fetch(AckRequired, MsgStatus = #msg_status { + seq_id = SeqId, + msg_id = MsgId, + msg = Msg, + is_persistent = IsPersistent, + is_delivered = IsDelivered, + msg_on_disk = MsgOnDisk, + index_on_disk = IndexOnDisk }, + State = #vqstate {ram_msg_count = RamMsgCount, + out_counter = OutCount, + index_state = IndexState, + msg_store_clients = MSCState, + len = Len, + persistent_count = PCount }) -> + %% 1. Mark it delivered if necessary + IndexState1 = maybe_write_delivered( + IndexOnDisk andalso not IsDelivered, + SeqId, IndexState), + + %% 2. Remove from msg_store and queue index, if necessary + Rem = fun () -> + ok = msg_store_remove(MSCState, IsPersistent, [MsgId]) + end, + Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end, + IndexState2 = + case {AckRequired, MsgOnDisk, IndexOnDisk, IsPersistent} of + {false, true, false, _} -> Rem(), IndexState1; + {false, true, true, _} -> Rem(), Ack(); + { true, true, true, false} -> Ack(); + _ -> IndexState1 + end, + + %% 3. If an ack is required, add something sensible to PA + {AckTag, State1} = case AckRequired of + true -> StateN = record_pending_ack( + MsgStatus #msg_status { + is_delivered = true }, State), + {SeqId, StateN}; + false -> {undefined, State} + end, + + PCount1 = PCount - one_if(IsPersistent andalso not AckRequired), + Len1 = Len - 1, + RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined), + + {{Msg, IsDelivered, AckTag, Len1}, + State1 #vqstate { ram_msg_count = RamMsgCount1, + out_counter = OutCount + 1, + index_state = IndexState2, + len = Len1, + persistent_count = PCount1 }}. + msg_store_callback(PersistentMsgIds, Pubs, AckTags, Fun, MsgPropsFun, AsyncCallback, SyncCallback) -> case SyncCallback(?MODULE, -- cgit v1.2.1 From de668242d321f4ce7d186f7cfa2ce67a5813c878 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 18:23:20 +0100 Subject: get rid of sync run_backing_queue it was only needed for tx Also remove some tx remnants from mirror_queue_slave. --- src/rabbit_amqqueue.erl | 9 +-------- src/rabbit_amqqueue_process.erl | 7 +------ src/rabbit_mirror_queue_slave.erl | 15 ++------------- 3 files changed, 4 insertions(+), 27 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 4d6aaa18..d9101bb1 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -31,8 +31,7 @@ %% internal --export([internal_declare/2, internal_delete/1, - run_backing_queue/3, run_backing_queue_async/3, +-export([internal_declare/2, internal_delete/1, run_backing_queue_async/3, sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, set_maximum_since_use/2, maybe_expire/1, drop_expired/1, emit_stats/1]). @@ -140,9 +139,6 @@ rabbit_types:connection_exit() | fun (() -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). --spec(run_backing_queue/3 :: - (pid(), atom(), - (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(run_backing_queue_async/3 :: (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). @@ -487,9 +483,6 @@ internal_delete(QueueName) -> end end). -run_backing_queue(QPid, Mod, Fun) -> - gen_server2:call(QPid, {run_backing_queue, Mod, Fun}, infinity). - run_backing_queue_async(QPid, Mod, Fun) -> gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 87cdf925..28e0db87 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -787,7 +787,6 @@ prioritise_call(Msg, _From, _State) -> info -> 9; {info, _Items} -> 9; consumers -> 9; - {run_backing_queue, _Mod, _Fun} -> 6; _ -> 0 end. @@ -1011,11 +1010,7 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> ChAckTags1 = subtract_acks(ChAckTags, AckTags), maybe_store_ch_record(C#cr{acktags = ChAckTags1}), noreply(requeue_and_run(AckTags, State)) - end; - -handle_call({run_backing_queue, Mod, Fun}, _From, State) -> - reply(ok, run_backing_queue(Mod, Fun, State)). - + end. handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 66ff575f..03a0dbbe 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -167,14 +167,7 @@ handle_call({gm_deaths, Deaths}, From, {error, not_found} -> gen_server2:reply(From, ok), {stop, normal, State} - end; - -handle_call({run_backing_queue, Mod, Fun}, _From, State) -> - reply(ok, run_backing_queue(Mod, Fun, State)); - -handle_call({commit, _Txn, _ChPid}, _From, State) -> - %% We don't support transactions in mirror queues - reply(ok, State). + end. handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); @@ -208,11 +201,7 @@ handle_cast(update_ram_duration, handle_cast(sync_timeout, State) -> noreply(backing_queue_timeout( - State #state { sync_timer_ref = undefined })); - -handle_cast({rollback, _Txn, _ChPid}, State) -> - %% We don't support transactions in mirror queues - noreply(State). + State #state { sync_timer_ref = undefined })). handle_info(timeout, State) -> noreply(backing_queue_timeout(State)); -- cgit v1.2.1 From 907bdbfb4bd9a099882379cfd09e31f2672d772b Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 18:34:23 +0100 Subject: s/run_backing_queue_async/run_backing_queue as well as removing some mroe tx remnants from mirror_queue_slave --- src/rabbit_amqqueue.erl | 6 +++--- src/rabbit_amqqueue_process.erl | 2 +- src/rabbit_mirror_queue_coordinator.erl | 2 +- src/rabbit_mirror_queue_master.erl | 2 +- src/rabbit_mirror_queue_slave.erl | 12 ++---------- 5 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index d9101bb1..e9d01d12 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -31,7 +31,7 @@ %% internal --export([internal_declare/2, internal_delete/1, run_backing_queue_async/3, +-export([internal_declare/2, internal_delete/1, run_backing_queue/3, sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, set_maximum_since_use/2, maybe_expire/1, drop_expired/1, emit_stats/1]). @@ -139,7 +139,7 @@ rabbit_types:connection_exit() | fun (() -> rabbit_types:ok_or_error('not_found') | rabbit_types:connection_exit())). --spec(run_backing_queue_async/3 :: +-spec(run_backing_queue/3 :: (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). @@ -483,7 +483,7 @@ internal_delete(QueueName) -> end end). -run_backing_queue_async(QPid, Mod, Fun) -> +run_backing_queue(QPid, Mod, Fun) -> gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). sync_timeout(QPid) -> diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 28e0db87..3e2bbf8d 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -192,7 +192,7 @@ bq_init(BQ, Q, Recover) -> Self = self(), BQ:init(Q, Recover, fun (Mod, Fun) -> - rabbit_amqqueue:run_backing_queue_async(Self, Mod, Fun) + rabbit_amqqueue:run_backing_queue(Self, Mod, Fun) end). process_args(State = #q{q = #amqqueue{arguments = Arguments}}) -> diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index d1c3a2e5..4906937b 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -70,7 +70,7 @@ %% group. Because the master is the bq of amqqueue_process, it doesn't %% have sole control over its mailbox, and as a result, the master %% itself cannot be passed messages directly (well, it could by via -%% the amqqueue:run_backing_queue_async callback but that would induce +%% the amqqueue:run_backing_queue callback but that would induce %% additional unnecessary loading on the master queue process), yet it %% needs to react to gm events, such as the death of slaves. Thus the %% master creates the coordinator, and it is the coordinator that is diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 082730e0..9e0ffb13 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -61,7 +61,7 @@ stop() -> sender_death_fun() -> Self = self(), fun (DeadPid) -> - rabbit_amqqueue:run_backing_queue_async( + rabbit_amqqueue:run_backing_queue( Self, ?MODULE, fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> ok = gm:broadcast(GM, {sender_death, DeadPid}), diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 03a0dbbe..93340ba8 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -260,7 +260,6 @@ handle_pre_hibernate(State = #state { backing_queue = BQ, prioritise_call(Msg, _From, _State) -> case Msg of - {run_backing_queue, _Mod, _Fun} -> 6; {gm_deaths, _Deaths} -> 5; _ -> 0 end. @@ -320,14 +319,7 @@ bq_init(BQ, Q, Recover) -> Self = self(), BQ:init(Q, Recover, fun (Mod, Fun) -> - rabbit_amqqueue:run_backing_queue_async(Self, Mod, Fun) - end, - fun (Mod, Fun) -> - rabbit_misc:with_exit_handler( - fun () -> error end, - fun () -> - rabbit_amqqueue:run_backing_queue(Self, Mod, Fun) - end) + rabbit_amqqueue:run_backing_queue(Self, Mod, Fun) end). run_backing_queue(rabbit_mirror_queue_master, Fun, State) -> @@ -594,7 +586,7 @@ confirm_sender_death(Pid) -> %% Note that we do not remove our knowledge of this ChPid until we %% get the sender_death from GM. {ok, _TRef} = timer:apply_after( - ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue_async, + ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue, [self(), rabbit_mirror_queue_master, Fun]), ok. -- cgit v1.2.1 From c6b0630ca1249f1a2bf4210feec2c788550ee2a4 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 25 Jun 2011 22:07:40 +0100 Subject: remember when we've refused a queue's can_send due to being blocked so that we can tell the queue when we get unblocked --- src/rabbit_limiter.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl index 1b72dd76..e79583fa 100644 --- a/src/rabbit_limiter.erl +++ b/src/rabbit_limiter.erl @@ -120,9 +120,9 @@ init([ChPid, UnackedMsgCount]) -> prioritise_call(get_limit, _From, _State) -> 9; prioritise_call(_Msg, _From, _State) -> 0. -handle_call({can_send, _QPid, _AckRequired}, _From, +handle_call({can_send, QPid, _AckRequired}, _From, State = #lim{blocked = true}) -> - {reply, false, State}; + {reply, false, limit_queue(QPid, State)}; handle_call({can_send, QPid, AckRequired}, _From, State = #lim{volume = Volume}) -> case limit_reached(State) of -- cgit v1.2.1 From 4c35f942c568abcf138e7b4e3356a149c240a983 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sun, 26 Jun 2011 08:52:59 +0100 Subject: cosmetic --- src/rabbit_variable_queue.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index e997bb61..e9d302f1 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -1073,7 +1073,7 @@ queue_out(State = #vqstate { q4 = Q4 }) -> case queue:out(Q4) of {empty, _Q4} -> case fetch_from_q3(State) of - {empty, _State1} = Result -> Result; + {empty, _State1} = Result -> Result; {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1} end; {{value, MsgStatus}, Q4a} -> -- cgit v1.2.1 From 1f91b16ad30f22175eeb8a77e4a6b5d33d888698 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sun, 26 Jun 2011 10:06:05 +0100 Subject: meh --- src/rabbit_variable_queue.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl index e9d302f1..c6d99deb 100644 --- a/src/rabbit_variable_queue.erl +++ b/src/rabbit_variable_queue.erl @@ -1063,8 +1063,9 @@ in_r(MsgStatus = #msg_status { msg = undefined, index_on_disk = IndexOnDisk }, true -> State #vqstate { q3 = bpqueue:in_r(IndexOnDisk, MsgStatus, Q3), ram_index_count = RamIndexCount + one_if(not IndexOnDisk) }; - false -> {MsgStatus1, State1} = read_msg(MsgStatus, State), - State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4) } + false -> {MsgStatus1, State1 = #vqstate { q4 = Q4a }} = + read_msg(MsgStatus, State), + State1 #vqstate { q4 = queue:in_r(MsgStatus1, Q4a) } end; in_r(MsgStatus, State = #vqstate { q4 = Q4 }) -> State #vqstate { q4 = queue:in_r(MsgStatus, Q4) }. -- cgit v1.2.1 From 78b92f44d5ce7b6715d48b27bf6b38749023eba3 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 26 Jun 2011 16:28:23 +0100 Subject: Add test --- src/rabbit_tests.erl | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index f5492cdc..624d41fb 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2136,6 +2136,7 @@ test_variable_queue() -> fun test_variable_queue_all_the_bits_not_covered_elsewhere1/1, fun test_variable_queue_all_the_bits_not_covered_elsewhere2/1, fun test_dropwhile/1, + fun test_dropwhile_varying_ram_duration/1, fun test_variable_queue_ack_limiting/1]], passed. @@ -2199,6 +2200,21 @@ test_dropwhile(VQ0) -> VQ4. +test_dropwhile_varying_ram_duration(VQ0) -> + VQ1 = rabbit_variable_queue:publish( + rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{}, <<>>), + #message_properties{}, self(), VQ0), + VQ2 = rabbit_variable_queue:set_ram_duration_target(0, VQ1), + VQ3 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ2), + VQ4 = rabbit_variable_queue:publish( + rabbit_basic:message( + rabbit_misc:r(<<>>, exchange, <<>>), + <<>>, #'P_basic'{}, <<>>), + #message_properties{}, self(), VQ3), + rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ4). + test_variable_queue_dynamic_duration_change(VQ0) -> SegmentSize = rabbit_queue_index:next_segment_boundary(0), -- cgit v1.2.1 From 8b5c2df689442652288bbfd865554e8f4f384f58 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 26 Jun 2011 16:38:51 +0100 Subject: Refactoring --- src/rabbit_tests.erl | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 624d41fb..c6f88980 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2087,8 +2087,11 @@ variable_queue_init(Q, Recover) -> Q, Recover, fun nop/2, fun nop/2, fun nop/2, fun nop/1). variable_queue_publish(IsPersistent, Count, VQ) -> + variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ). + +variable_queue_publish(IsPersistent, Count, PropFun, VQ) -> lists:foldl( - fun (_N, VQN) -> + fun (N, VQN) -> rabbit_variable_queue:publish( rabbit_basic:message( rabbit_misc:r(<<>>, exchange, <<>>), @@ -2096,7 +2099,7 @@ variable_queue_publish(IsPersistent, Count, VQ) -> true -> 2; false -> 1 end}, <<>>), - #message_properties{}, self(), VQN) + PropFun(N, #message_properties{}), self(), VQN) end, VQ, lists:seq(1, Count)). variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) -> @@ -2173,14 +2176,9 @@ test_dropwhile(VQ0) -> Count = 10, %% add messages with sequential expiry - VQ1 = lists:foldl( - fun (N, VQN) -> - rabbit_variable_queue:publish( - rabbit_basic:message( - rabbit_misc:r(<<>>, exchange, <<>>), - <<>>, #'P_basic'{}, <<>>), - #message_properties{expiry = N}, self(), VQN) - end, VQ0, lists:seq(1, Count)), + VQ1 = variable_queue_publish( + false, Count, + fun (N, Props) -> Props#message_properties{expiry = N} end, VQ0), %% drop the first 5 messages VQ2 = rabbit_variable_queue:dropwhile( @@ -2201,18 +2199,10 @@ test_dropwhile(VQ0) -> VQ4. test_dropwhile_varying_ram_duration(VQ0) -> - VQ1 = rabbit_variable_queue:publish( - rabbit_basic:message( - rabbit_misc:r(<<>>, exchange, <<>>), - <<>>, #'P_basic'{}, <<>>), - #message_properties{}, self(), VQ0), + VQ1 = variable_queue_publish(false, 1, VQ0), VQ2 = rabbit_variable_queue:set_ram_duration_target(0, VQ1), VQ3 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ2), - VQ4 = rabbit_variable_queue:publish( - rabbit_basic:message( - rabbit_misc:r(<<>>, exchange, <<>>), - <<>>, #'P_basic'{}, <<>>), - #message_properties{}, self(), VQ3), + VQ4 = variable_queue_publish(false, 1, VQ3), rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ4). test_variable_queue_dynamic_duration_change(VQ0) -> -- cgit v1.2.1 From db546098abcac143c531715cd4a6421f33184a12 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 26 Jun 2011 17:50:58 +0100 Subject: Ensure we hit both branches of the newly added case queue:is_empty(Q4) of --- src/rabbit_tests.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index c6f88980..8f655846 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2199,11 +2199,13 @@ test_dropwhile(VQ0) -> VQ4. test_dropwhile_varying_ram_duration(VQ0) -> - VQ1 = variable_queue_publish(false, 1, VQ0), + VQ1 = variable_queue_publish(false, 2, VQ0), VQ2 = rabbit_variable_queue:set_ram_duration_target(0, VQ1), VQ3 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ2), VQ4 = variable_queue_publish(false, 1, VQ3), - rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ4). + VQ5 = rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ4), + {VQ6, [_AckTag]} = variable_queue_fetch(1, false, false, 3, VQ5), + rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ6). test_variable_queue_dynamic_duration_change(VQ0) -> SegmentSize = rabbit_queue_index:next_segment_boundary(0), -- cgit v1.2.1 From 68729ce7d03eae11e669dd67bf1a52a143207bbe Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 26 Jun 2011 17:57:57 +0100 Subject: simplify --- src/rabbit_tests.erl | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 8f655846..3ee71a6d 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -2199,13 +2199,12 @@ test_dropwhile(VQ0) -> VQ4. test_dropwhile_varying_ram_duration(VQ0) -> - VQ1 = variable_queue_publish(false, 2, VQ0), + VQ1 = variable_queue_publish(false, 1, VQ0), VQ2 = rabbit_variable_queue:set_ram_duration_target(0, VQ1), - VQ3 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ2), - VQ4 = variable_queue_publish(false, 1, VQ3), - VQ5 = rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ4), - {VQ6, [_AckTag]} = variable_queue_fetch(1, false, false, 3, VQ5), - rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ6). + VQ3 = rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ2), + VQ4 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ3), + VQ5 = variable_queue_publish(false, 1, VQ4), + rabbit_variable_queue:dropwhile(fun(_) -> false end, VQ5). test_variable_queue_dynamic_duration_change(VQ0) -> SegmentSize = rabbit_queue_index:next_segment_boundary(0), -- cgit v1.2.1 From 14f3091f8ff207620a12595ca003edbf8b85497e Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sun, 26 Jun 2011 18:28:43 +0100 Subject: essentially cosmetic tweak the result of terminate_and_shutdown is not needed, but it looks odd to throw away State1 in one branch. --- src/rabbit_amqqueue_process.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index c1fa048d..e388ccf2 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -220,7 +220,7 @@ terminate_shutdown(Fun, State) -> State1 = #q{backing_queue = BQ, backing_queue_state = BQS} = stop_sync_timer(stop_rate_timer(State)), case BQS of - undefined -> State; + undefined -> State1; _ -> ok = rabbit_memory_monitor:deregister(self()), BQS1 = lists:foldl( fun (#cr{txn = none}, BQSN) -> -- cgit v1.2.1 From 0835a8e307cc73a2bd9e789cb6d55d1c58b734ed Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Sun, 26 Jun 2011 20:23:39 +0100 Subject: Add missing specs; move some functions to more appropriate places --- src/rabbit_mirror_queue_coordinator.erl | 10 +++++ src/rabbit_mirror_queue_master.erl | 76 +++++++++++++++++++++++---------- 2 files changed, 63 insertions(+), 23 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index 2727c1d0..57f6ca8b 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -37,6 +37,16 @@ -define(ONE_SECOND, 1000). +-ifdef(use_specs). + +-spec(start_link/3 :: (rabbit_types:amqqueue(), pid() | 'undefined', + rabbit_mirror_queue_master:death_fun()) -> + rabbit_types:ok_pid_or_error()). +-spec(get_gm/1 :: (pid()) -> pid()). +-spec(ensure_monitoring/2 :: (pid(), [pid()]) -> 'ok'). + +-endif. + %%---------------------------------------------------------------------------- %% %% Mirror Queues diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 463b8cfb..b090ebe8 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -43,6 +43,28 @@ known_senders }). +-ifdef(use_specs). + +-export_type([death_fun/0]). + +-type(death_fun() :: fun ((pid()) -> 'ok')). +-type(master_state() :: #state { gm :: pid(), + coordinator :: pid(), + backing_queue :: atom(), + backing_queue_state :: any(), + set_delivered :: non_neg_integer(), + seen_status :: dict(), + confirmed :: [rabbit_guid:guid()], + ack_msg_id :: dict(), + known_senders :: set() + }). + +-spec(promote_backing_queue_state/6 :: + (pid(), atom(), any(), pid(), dict(), [pid()]) -> master_state()). +-spec(sender_death_fun/0 :: () -> death_fun()). + +-endif. + %% For general documentation of HA design, see %% rabbit_mirror_queue_coordinator @@ -59,18 +81,6 @@ stop() -> %% Same as start/1. exit({not_valid_for_generic_backing_queue, ?MODULE}). -sender_death_fun() -> - Self = self(), - fun (DeadPid) -> - rabbit_amqqueue:run_backing_queue_async( - Self, ?MODULE, - fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> - ok = gm:broadcast(GM, {sender_death, DeadPid}), - KS1 = sets:del_element(DeadPid, KS), - State #state { known_senders = KS1 } - end) - end. - init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover, AsyncCallback, SyncCallback) -> {ok, CPid} = rabbit_mirror_queue_coordinator:start_link( @@ -95,17 +105,6 @@ init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover, ack_msg_id = dict:new(), known_senders = sets:new() }. -promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, KS) -> - #state { gm = GM, - coordinator = CPid, - backing_queue = BQ, - backing_queue_state = BQS, - set_delivered = BQ:len(BQS), - seen_status = SeenStatus, - confirmed = [], - ack_msg_id = dict:new(), - known_senders = sets:from_list(KS) }. - terminate({shutdown, dropped} = Reason, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> %% Backing queue termination - this node has been explicitly @@ -365,6 +364,37 @@ discard(Msg = #basic_message { id = MsgId }, ChPid, State end. +%% --------------------------------------------------------------------------- +%% Other exported functions +%% --------------------------------------------------------------------------- + +promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, KS) -> + #state { gm = GM, + coordinator = CPid, + backing_queue = BQ, + backing_queue_state = BQS, + set_delivered = BQ:len(BQS), + seen_status = SeenStatus, + confirmed = [], + ack_msg_id = dict:new(), + known_senders = sets:from_list(KS) }. + +sender_death_fun() -> + Self = self(), + fun (DeadPid) -> + rabbit_amqqueue:run_backing_queue_async( + Self, ?MODULE, + fun (?MODULE, State = #state { gm = GM, known_senders = KS }) -> + ok = gm:broadcast(GM, {sender_death, DeadPid}), + KS1 = sets:del_element(DeadPid, KS), + State #state { known_senders = KS1 } + end) + end. + +%% --------------------------------------------------------------------------- +%% Helpers +%% --------------------------------------------------------------------------- + maybe_store_acktag(undefined, _MsgId, AM) -> AM; maybe_store_acktag(AckTag, MsgId, AM) -> -- cgit v1.2.1 From c5ca012bd7f86eb0a8f3232267710f36565cfe87 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 27 Jun 2011 11:44:33 +0100 Subject: Add further backing queue methods --- src/rabbit_backing_queue_qc.erl | 187 ++++++++++++++++++++++++++++------------ 1 file changed, 133 insertions(+), 54 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index f5cd7f95..83e7da5e 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -23,26 +23,50 @@ -define(BQMOD, rabbit_variable_queue). +-define(RECORD_INDEX(Key, Record), + erlang:element(2, proplists:lookup(Key, lists:zip( + record_info(fields, Record), lists:seq(2, record_info(size, Record)))))). + -export([initial_state/0, command/1, precondition/2, postcondition/3, next_state/3]). -export([prop_backing_queue_test/0]). - -record(state, {bqstate, - messages, - acks}). + messages, %% queue of {msg_props, basic_msg} + acks, %% list of {acktag, {message_props, basic_msg}} + confirms}). %% set of msgid +%% Initialise model initial_state() -> - VQ = qc_variable_queue_init(qc_test_queue()), - #state{bqstate=VQ, messages = queue:new(), acks = []}. + #state{bqstate = qc_variable_queue_init(qc_test_queue()), + messages = queue:new(), + acks = [], + confirms = gb_sets:new()}. + +%% Property prop_backing_queue_test() -> ?FORALL(Cmds, commands(?MODULE, initial_state()), begin - {_H, #state{bqstate = VQ}, Res} = run_commands(?MODULE, Cmds), - rabbit_variable_queue:delete_and_terminate(shutdown, VQ), + {ok, FileSizeLimit} = + application:get_env(rabbit, msg_store_file_size_limit), + application:set_env(rabbit, msg_store_file_size_limit, 512, + infinity), + {ok, MaxJournal} = + application:get_env(rabbit, queue_index_max_journal_entries), + application:set_env(rabbit, queue_index_max_journal_entries, 128, + infinity), + + {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds), + + application:set_env(rabbit, msg_store_file_size_limit, + FileSizeLimit, infinity), + application:set_env(rabbit, queue_index_max_journal_entries, + MaxJournal, infinity), + + rabbit_variable_queue:delete_and_terminate(shutdown, BQ), ?WHENFAIL( io:format("Result: ~p~n", [Res]), aggregate(command_names(Cmds), Res =:= ok)) @@ -50,32 +74,46 @@ prop_backing_queue_test() -> %% Commands -command(#state{bqstate = VQ} = S) -> +command(S) -> ?SIZED(Size, - frequency([{Size, qc_publish(S)}, - {Size, qc_fetch(S)}, - {Size, qc_ack(S)}, - {Size, qc_requeue(S)}, - {Size, qc_ram(S)}, - {1, {call, ?BQMOD, purge, [VQ]}}])). - -qc_publish(#state{bqstate = VQ}) -> + frequency([{Size, qc_publish(S)}, + {Size, qc_fetch(S)}, + {Size, qc_ack(S)}, + {Size, qc_requeue(S)}, + {Size, qc_ram(S)}, + {Size, qc_drain_confirmed(S)}, + {Size, qc_dropwhile(S)}, + {1, qc_purge(S)}])). + +qc_publish(#state{bqstate = BQ}) -> {call, ?BQMOD, publish, - [qc_message(), #message_properties{}, self(), VQ]}. + [qc_message(), + #message_properties{needs_confirming = frequency([{1, true}, + {20, false}]), + expiry = choose(0, 10)}, + self(), BQ]}. -qc_fetch(#state{bqstate = VQ}) -> - {call, ?BQMOD, fetch, [boolean(), VQ]}. +qc_fetch(#state{bqstate = BQ}) -> + {call, ?BQMOD, fetch, [boolean(), BQ]}. -qc_ack(#state{bqstate = VQ, acks = Acks}) -> - {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), VQ]}. +qc_ack(#state{bqstate = BQ, acks = Acks}) -> + {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), BQ]}. -qc_requeue(#state{bqstate = VQ, acks = Acks}) -> +qc_requeue(#state{bqstate = BQ, acks = Acks}) -> {call, ?BQMOD, requeue, - [rand_choice(proplists:get_keys(Acks)), fun(MsgOpts) -> MsgOpts end, VQ]}. + [rand_choice(proplists:get_keys(Acks)), fun(MsgOpts) -> MsgOpts end, BQ]}. + +qc_ram(#state{bqstate = BQ}) -> + {call, ?BQMOD, set_ram_duration_target, [oneof([0, infinity]), BQ]}. + +qc_drain_confirmed(#state{bqstate = BQ}) -> + {call, ?BQMOD, drain_confirmed, [BQ]}. -qc_ram(#state{bqstate = VQ}) -> - {call, ?BQMOD, set_ram_duration_target, - [oneof([0, infinity]), VQ]}. +qc_dropwhile(#state{bqstate = BQ}) -> + {call, ?BQMOD, dropwhile, [fun dropfun/1, BQ]}. + +qc_purge(#state{bqstate = BQ}) -> + {call, ?BQMOD, purge, [BQ]}. %% Preconditions @@ -85,68 +123,92 @@ precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg}) precondition(_S, {call, ?BQMOD, _Fun, _Arg}) -> true. -%% Next state - -next_state(S, VQ, {call, ?BQMOD, publish, [Msg, _MsgProps, _Pid, _VQ]}) -> - #state{messages = Messages} = S, - S#state{bqstate = VQ, messages = queue:in(Msg, Messages)}; - -next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _VQ]}) -> +%% Model updates + +next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Pid, _BQ]}) -> + #state{messages = Messages, confirms = Confirms} = S, + MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]}, + NeedsConfirm = + {call, erlang, element, + [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]}, + Confirms1 = case eval(NeedsConfirm) of + true -> gb_sets:add(MsgId, Confirms); + _ -> Confirms + end, + S#state{bqstate = BQ, + messages = queue:in({MsgProps, Msg}, Messages), + confirms = Confirms1}; + +next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> #state{messages = Messages, acks = Acks} = S, ResultInfo = {call, erlang, element, [1, Res]}, - VQ1 = {call, erlang, element, [2, Res]}, + BQ1 = {call, erlang, element, [2, Res]}, AckTag = {call, erlang, element, [3, ResultInfo]}, - S1 = S#state{bqstate = VQ1}, + S1 = S#state{bqstate = BQ1}, case queue:out(Messages) of {empty, _M2} -> S1; - {{value, Msg}, M2} -> + {{value, MsgProp_Msg}, M2} -> S2 = S1#state{messages = M2}, case AckReq of - true -> S2#state{acks = Acks ++ [{AckTag, Msg}]}; + true -> S2#state{acks = Acks ++ [{AckTag, MsgProp_Msg}]}; false -> S2 end end; -next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _VQ]}) -> +next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) -> #state{acks = AcksState} = S, - VQ1 = {call, erlang, element, [2, Res]}, - S#state{bqstate = VQ1, + BQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = BQ1, acks = propvals_by_keys(AcksState, AcksArg)}; next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> #state{messages = Messages, acks = AcksState} = S, - VQ1 = {call, erlang, element, [2, Res]}, - RequeueMsgs = [proplists:get_value(Key, AcksState) || Key <- AcksArg ], - S#state{bqstate = VQ1, + BQ1 = {call, erlang, element, [2, Res]}, + RequeueMsgs = [proplists:get_value(Key, AcksState) || Key <- AcksArg], + S#state{bqstate = BQ1, messages = queue:join(Messages, queue:from_list(RequeueMsgs)), acks = propvals_by_keys(AcksState, AcksArg)}; -next_state(S, VQ, {call, ?BQMOD, set_ram_duration_target, _A}) -> - S#state{bqstate = VQ}; +next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) -> + S#state{bqstate = BQ}; -next_state(S, Res, {call, ?BQMOD, purge, _A}) -> - VQ1 = {call, erlang, element, [2, Res]}, - S#state{bqstate = VQ1, messages = queue:new()}. +next_state(S, Res, {call, ?BQMOD, drain_confirmed, _Args}) -> + BQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = BQ1}; + +next_state(S, BQ1, {call, ?BQMOD, dropwhile, _Args}) -> + #state{messages = Messages} = S, + S#state{bqstate = BQ1, messages = drop_messages(Messages)}; + +next_state(S, Res, {call, ?BQMOD, purge, _Args}) -> + BQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = BQ1, messages = queue:new()}. %% Postconditions postcondition(#state{messages = Messages}, {call, ?BQMOD, fetch, _Args}, Res) -> case Res of - {{MsgFetched, _IsDelivered, _AckTag, _Remaining_Len}, _VQ} -> - MsgFetched =:= queue:head(Messages); - {empty, _VQ} -> + {{MsgFetched, _IsDelivered, _AckTag, _Remaining_Len}, _BQ} -> + {_MsgProps, Msg} = queue:head(Messages), + MsgFetched =:= Msg; + {empty, _BQ} -> queue:len(Messages) =:= 0 end; postcondition(#state{messages = Messages}, {call, ?BQMOD, purge, _Args}, Res) -> - {PurgeCount, _VQ} = Res, + {PurgeCount, _BQ} = Res, queue:len(Messages) =:= PurgeCount; -postcondition(#state{bqstate = VQ, +postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) -> + #state{confirms = Confirms} = S, + {ReportedConfirmed, _BQ} = Res, + lists:all(fun (M) -> lists:member(M, Confirms) end, ReportedConfirmed); + +postcondition(#state{bqstate = BQ, messages = Messages}, {call, ?BQMOD, _Fun, _Args}, _Res) -> - ?BQMOD:len(VQ) =:= queue:len(Messages). + ?BQMOD:len(BQ) =:= queue:len(Messages). %% Helpers @@ -201,3 +263,20 @@ rand_choice(List) -> [] -> []; _ -> [lists:nth(random:uniform(length(List)), List)] end. + +dropfun(Props) -> + Expiry = eval({call, erlang, element, + [?RECORD_INDEX(expiry, message_properties), Props]}), + Expiry =/= 0. + +drop_messages(Messages) -> + case queue:out(Messages) of + {empty, _} -> + Messages; + {{value, MsgProps_Msg}, M2} -> + MsgProps = {call, erlang, element, [1, MsgProps_Msg]}, + case dropfun(MsgProps) of + true -> drop_messages(M2); + false -> Messages + end + end. -- cgit v1.2.1 From 34894f392d2f9a20c0e0f8c257d09a9eb2990835 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 27 Jun 2011 16:54:16 +0100 Subject: Remove tonyg and md5 checksums, since the Macports guys already have done and we don't want to reinstate them. --- packaging/macports/Portfile.in | 4 +--- packaging/macports/make-checksums.sh | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in index 809f518b..4a866305 100644 --- a/packaging/macports/Portfile.in +++ b/packaging/macports/Portfile.in @@ -5,7 +5,7 @@ PortSystem 1.0 name rabbitmq-server version @VERSION@ categories net -maintainers paperplanes.de:meyer rabbitmq.com:tonyg openmaintainer +maintainers paperplanes.de:meyer openmaintainer platforms darwin supported_archs noarch @@ -24,11 +24,9 @@ distfiles ${name}-${version}${extract.suffix} \ checksums \ ${name}-${version}${extract.suffix} \ - md5 @md5-src@ \ sha1 @sha1-src@ \ rmd160 @rmd160-src@ \ ${name}-generic-unix-${version}${extract.suffix} \ - md5 @md5-bin@ \ sha1 @sha1-bin@ \ rmd160 @rmd160-bin@ diff --git a/packaging/macports/make-checksums.sh b/packaging/macports/make-checksums.sh index 11424dfc..891de6ba 100755 --- a/packaging/macports/make-checksums.sh +++ b/packaging/macports/make-checksums.sh @@ -6,7 +6,7 @@ for type in src bin do tarball_var=tarball_${type} tarball=${!tarball_var} - for algo in md5 sha1 rmd160 + for algo in sha1 rmd160 do checksum=$(openssl $algo ${tarball} | awk '{print $NF}') echo "s|@$algo-$type@|$checksum|g" -- cgit v1.2.1 From a0d8596c230e8ebf3e177eb0bd71b6113076d170 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 27 Jun 2011 17:32:57 +0100 Subject: Scratch space for exchanges, and rabbit_exchange:update/2. --- include/rabbit.hrl | 3 ++- src/rabbit_exchange.erl | 23 ++++++++++++++++++++++- src/rabbit_upgrade_functions.erl | 14 ++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/include/rabbit.hrl b/include/rabbit.hrl index 00b7e6e9..9c594b05 100644 --- a/include/rabbit.hrl +++ b/include/rabbit.hrl @@ -42,7 +42,8 @@ -record(resource, {virtual_host, kind, name}). --record(exchange, {name, type, durable, auto_delete, internal, arguments}). +-record(exchange, {name, type, durable, auto_delete, internal, arguments, + scratch}). -record(exchange_serial, {name, next}). -record(amqqueue, {name, durable, auto_delete, exclusive_owner = none, diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index cab1b99f..0c335463 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -20,7 +20,7 @@ -export([recover/0, callback/3, declare/6, assert_equivalence/6, assert_args_equivalence/2, check_type/1, - lookup/1, lookup_or_die/1, list/1, + lookup/1, lookup_or_die/1, list/1, update/2, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). %% these must be run inside a mnesia tx @@ -199,6 +199,27 @@ list(VHostPath) -> rabbit_exchange, #exchange{name = rabbit_misc:r(VHostPath, exchange), _ = '_'}). +update(Name, Fun) -> + case mnesia:transaction( + fun() -> + case mnesia:read(rabbit_exchange, Name, write) of + [X = #exchange{durable = Durable}] -> + ok = mnesia:write(rabbit_exchange, Fun(X), write), + case Durable of + true -> + ok = mnesia:write(rabbit_durable_exchange, + Fun(X), write); + _ -> + ok + end; + [] -> + ok + end + end) of + {atomic, ok} -> ok; + {aborted, Reason} -> {error, Reason} + end. + info_keys() -> ?INFO_KEYS. map(VHostPath, F) -> diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index 0f7a7810..acf45bf3 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -32,6 +32,7 @@ -rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}). -rabbit_upgrade({ha_mirrors, mnesia, []}). -rabbit_upgrade({gm, mnesia, []}). +-rabbit_upgrade({exchange_scratch, mnesia, [trace_exchanges]}). %% ------------------------------------------------------------------- @@ -49,6 +50,7 @@ -spec(user_admin_to_tags/0 :: () -> 'ok'). -spec(ha_mirrors/0 :: () -> 'ok'). -spec(gm/0 :: () -> 'ok'). +-spec(exchange_scratch/0 :: () -> 'ok'). -endif. @@ -155,6 +157,18 @@ gm() -> create(gm_group, [{record_name, gm_group}, {attributes, [name, version, members]}]). +exchange_scratch() -> + ok = exchange_scratch(rabbit_exchange), + ok = exchange_scratch(rabbit_durable_exchange). + +exchange_scratch(Table) -> + transform( + Table, + fun ({exchange, Name, Type, Dur, AutoDel, Int, Args}) -> + {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined} + end, + [name, type, durable, auto_delete, internal, arguments, scratch]). + %%-------------------------------------------------------------------- transform(TableName, Fun, FieldList) -> -- cgit v1.2.1 From 4c685a35e7020e8e66584759a18e2da6e4c39969 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 27 Jun 2011 18:21:06 +0100 Subject: tiny refactor: better function names --- src/rabbit_channel.erl | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 51d844c9..b8501c68 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -290,7 +290,7 @@ handle_cast({deliver, ConsumerTag, AckRequired, true -> deliver; false -> deliver_no_ack end, State), - maybe_incr_redeliver(Redelivered, QPid, State), + maybe_incr_redeliver_stats(Redelivered, QPid, State), rabbit_trace:tap_trace_out(Msg, TraceState), noreply(State1#ch{next_tag = DeliveryTag + 1}); @@ -684,12 +684,11 @@ handle_method(#'basic.get'{queue = QueueNameBin, State1 = lock_message(not(NoAck), ack_record(DeliveryTag, none, Msg), State), - maybe_incr_stats([{QPid, 1}], - case NoAck of - true -> get_no_ack; - false -> get - end, State), - maybe_incr_redeliver(Redelivered, QPid, State), + maybe_incr_stats([{QPid, 1}], case NoAck of + true -> get_no_ack; + false -> get + end, State), + maybe_incr_redeliver_stats(Redelivered, QPid, State), rabbit_trace:tap_trace_out(Msg, TraceState), ok = rabbit_writer:send_command( WriterPid, @@ -1454,9 +1453,9 @@ i(client_flow_blocked, #ch{limiter_pid = LimiterPid}) -> i(Item, _) -> throw({bad_argument, Item}). -maybe_incr_redeliver(true, QPid, State) -> +maybe_incr_redeliver_stats(true, QPid, State) -> maybe_incr_stats([{QPid, 1}], redeliver, State); -maybe_incr_redeliver(_, _, _) -> +maybe_incr_redeliver_stats(_, _, _) -> ok. maybe_incr_stats(QXIncs, Measure, #ch{stats_timer = StatsTimer}) -> -- cgit v1.2.1 From a41f6d5d1f7426f6049be8eccc7d04e3e6a633d5 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 13:34:01 +0100 Subject: init:stop knows better than us. It makes sure applications are taken down in the right order (which we were not doing). --- src/rabbit.erl | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 100cacb0..5f727a82 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -211,11 +211,7 @@ stop() -> ok = rabbit_misc:stop_applications(?APPS). stop_and_halt() -> - try - stop() - after - init:stop() - end, + init:stop(), ok. status() -> -- cgit v1.2.1 From 33de4c3bfb553fd704c2908bb464d26cba1bb89f Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 28 Jun 2011 14:00:29 +0100 Subject: Add conditional compilation and makefile infrastructure --- Makefile | 19 ++++++++++++++++++- quickcheck | 32 ++++++++++++++++++++++++++++++++ src/rabbit_backing_queue_qc.erl | 3 +++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100755 quickcheck diff --git a/Makefile b/Makefile index d8ef058e..ad07f1ef 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,8 @@ MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml)) WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml) USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML))) +QC_MODULES := rabbit_backing_queue_qc +QC_TRIALS ?= 100 ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes) PYTHON=python @@ -45,8 +47,14 @@ ifndef USE_SPECS USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,8,4]), halt().') endif +ifndef USE_PROPER_QC +# PropEr needs to be installed for property checking +# http://proper.softlab.ntua.gr/ +USE_PROPER_QC:=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().') +endif + #other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests -ERLC_OPTS=-I $(INCLUDE_DIR) -o $(EBIN_DIR) -Wall -v +debug_info $(if $(filter true,$(USE_SPECS)),-Duse_specs) +ERLC_OPTS=-I $(INCLUDE_DIR) -o $(EBIN_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc) VERSION=0.0.0 TARBALL_NAME=rabbitmq-server-$(VERSION) @@ -69,6 +77,10 @@ define usage_dep $(call usage_xml_to_erl, $(1)): $(1) $(DOCS_DIR)/usage.xsl endef +define boolean_macro +$(if $(filter true,$(1)),-D$(2)) +endef + ifneq "$(SBIN_DIR)" "" ifneq "$(TARGET_DIR)" "" SCRIPTS_REL_PATH=$(shell ./calculate-relative $(TARGET_DIR)/sbin $(SBIN_DIR)) @@ -165,6 +177,9 @@ run-tests: all OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \ echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null +run-qc: all + $(foreach MOD,$(QC_MODULES),./quickcheck $(RABBITMQ_NODENAME) $(MOD) $(QC_TRIALS)) + start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ RABBITMQ_NODE_ONLY=true \ @@ -314,3 +329,5 @@ ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" "" -include $(DEPS_FILE) endif +.SUFFIXES: +.SUFFIXES: .erl .hrl .beam diff --git a/quickcheck b/quickcheck new file mode 100755 index 00000000..24edae70 --- /dev/null +++ b/quickcheck @@ -0,0 +1,32 @@ +#!/usr/bin/env escript +%% -*- erlang -*- +%%! -sname quickcheck +-mode(compile). + +%% A helper to test quickcheck properties on a running broker +%% NodeStr is a local broker node name +%% ModStr is the module containing quickcheck properties +%% The number of trials is optional +main([NodeStr, ModStr | TrialsStr]) -> + Node = list_to_atom(NodeStr ++ "@" ++ net_adm:localhost()), + Mod = list_to_atom(ModStr), + Trials = lists:map(fun erlang:list_to_integer/1, TrialsStr), + case rpc:call(Node, code, ensure_loaded, [proper]) of + {module, proper} -> + case rpc:call(Node, proper, module, [Mod] ++ Trials) of + [] -> ok; + _ -> quit(1) + end; + _ -> + io:format("PropEr module not present on node ~p.~n", [Node]), + quit(2) + end; +main([]) -> + io:format("This script requires a node name and a module.~n"). + +quit(Status) -> + case os:type() of + {unix, _} -> halt(Status); + {win32, _} -> init:stop(Status) + end. + diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index 83e7da5e..c0938be3 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -15,6 +15,7 @@ %% -module(rabbit_backing_queue_qc). +-ifdef(use_proper_qc). -include("rabbit.hrl"). -include("rabbit_framing.hrl"). -include_lib("proper/include/proper.hrl"). @@ -280,3 +281,5 @@ drop_messages(Messages) -> false -> Messages end end. + +-endif. -- cgit v1.2.1 From f85ce9974466f0e2a30c0455abd027891e13acc1 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 28 Jun 2011 14:03:45 +0100 Subject: Complicate Makefile processing --- Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index ad07f1ef..04f74fda 100644 --- a/Makefile +++ b/Makefile @@ -178,7 +178,7 @@ run-tests: all echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null run-qc: all - $(foreach MOD,$(QC_MODULES),./quickcheck $(RABBITMQ_NODENAME) $(MOD) $(QC_TRIALS)) + $(foreach MOD,$(QC_MODULES),./quickcheck $(RABBITMQ_NODENAME) $(MOD) $(QC_TRIALS)) start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ @@ -329,5 +329,3 @@ ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" "" -include $(DEPS_FILE) endif -.SUFFIXES: -.SUFFIXES: .erl .hrl .beam -- cgit v1.2.1 From 75a04e11e0749f8268ae9f66e7f8a09d2db7879b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 15:32:31 +0100 Subject: Build graph; prune graph; traverse graph; obey graph. --- src/rabbit.erl | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 5f727a82..ce120570 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -201,14 +201,14 @@ prepare() -> start() -> try ok = prepare(), - ok = rabbit_misc:start_applications(?APPS) + ok = rabbit_misc:start_applications(application_load_order()) after %%give the error loggers some time to catch up timer:sleep(100) end. stop() -> - ok = rabbit_misc:stop_applications(?APPS). + ok = rabbit_misc:stop_applications(application_load_order()). stop_and_halt() -> init:stop(), @@ -386,6 +386,24 @@ config_files() -> %%--------------------------------------------------------------------------- +application_load_order() -> + {ok, G} = rabbit_misc:build_acyclic_graph( + fun application_graph_vertex/2, fun application_graph_edge/2, + [{App, Deps} || + {App, _Desc, _Vsn} <- application:loaded_applications(), + {ok, Deps} <- [application:get_key(App, applications)]]), + true = digraph:del_vertices( + G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), + digraph_utils:topsort(G). + +application_graph_vertex(App, _Deps) -> + [{App, App}]. + +application_graph_edge(App, Deps) -> + [{Dep, App} || Dep <- Deps]. + +%%--------------------------------------------------------------------------- + print_banner() -> {ok, Product} = application:get_key(id), {ok, Version} = application:get_key(vsn), -- cgit v1.2.1 From aa50d37b089a47bd75c9bc27b0ccf68baa456bc6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 15:36:22 +0100 Subject: Delete graph after use --- src/rabbit.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index ce120570..af9cb472 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -394,7 +394,9 @@ application_load_order() -> {ok, Deps} <- [application:get_key(App, applications)]]), true = digraph:del_vertices( G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), - digraph_utils:topsort(G). + Result = digraph_utils:topsort(G), + true = digraph:delete(G), + Result. application_graph_vertex(App, _Deps) -> [{App, App}]. -- cgit v1.2.1 From eab449af808d625237a50b304a6c1ced68476770 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 15:41:23 +0100 Subject: Finally, just to be really safe, avoid init:stop as far as we sensibly can on stop_and_halt as we know that does the wrong thing should applications have been manually stopped and restarted --- src/rabbit.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index af9cb472..16b8ac19 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -211,7 +211,11 @@ stop() -> ok = rabbit_misc:stop_applications(application_load_order()). stop_and_halt() -> - init:stop(), + try + stop() + after + init:stop() + end, ok. status() -> -- cgit v1.2.1 From 8fb7ce40ecc2f7e7381c327237fbed27dd9546e2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 15:42:24 +0100 Subject: cosmetic --- src/rabbit.erl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 16b8ac19..c8a60bd8 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -402,11 +402,9 @@ application_load_order() -> true = digraph:delete(G), Result. -application_graph_vertex(App, _Deps) -> - [{App, App}]. +application_graph_vertex(App, _Deps) -> [{App, App}]. -application_graph_edge(App, Deps) -> - [{Dep, App} || Dep <- Deps]. +application_graph_edge(App, Deps) -> [{Dep, App} || Dep <- Deps]. %%--------------------------------------------------------------------------- -- cgit v1.2.1 From 65197bc696c4fee4ff11a05c1074284f0b0ea9e6 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 15:57:24 +0100 Subject: All keys are optional. We must not omit a vertex just because it has not any dependencies --- src/rabbit.erl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index c8a60bd8..b523a26d 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -395,7 +395,13 @@ application_load_order() -> fun application_graph_vertex/2, fun application_graph_edge/2, [{App, Deps} || {App, _Desc, _Vsn} <- application:loaded_applications(), - {ok, Deps} <- [application:get_key(App, applications)]]), + begin + Deps = case application:get_key(App, applications) of + undefined -> []; + {ok, Lst} -> Lst + end, + true + end]), true = digraph:del_vertices( G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), Result = digraph_utils:topsort(G), -- cgit v1.2.1 From e7ec913d9c76db773b5a548e8dac3dbc35cbeb70 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 16:01:47 +0100 Subject: Avoid pointless begin-end block. Duh. --- src/rabbit.erl | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index b523a26d..0d163944 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -393,15 +393,11 @@ config_files() -> application_load_order() -> {ok, G} = rabbit_misc:build_acyclic_graph( fun application_graph_vertex/2, fun application_graph_edge/2, - [{App, Deps} || - {App, _Desc, _Vsn} <- application:loaded_applications(), - begin - Deps = case application:get_key(App, applications) of - undefined -> []; - {ok, Lst} -> Lst - end, - true - end]), + [{App, case application:get_key(App, applications) of + undefined -> []; + {ok, Lst} -> Lst + end} || + {App, _Desc, _Vsn} <- application:loaded_applications()]), true = digraph:del_vertices( G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), Result = digraph_utils:topsort(G), -- cgit v1.2.1 From 35b76af72622664b6d59f41e34811934e181ebc2 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 16:37:26 +0100 Subject: There's a possibility that rabbit has to be able to be started without being booted. Thus we need to be able to load the transitive closure of rabbit's dependencies on demand. This is especially needed for the tests. --- src/rabbit.erl | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/rabbit.erl b/src/rabbit.erl index 0d163944..5ec0611a 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -390,7 +390,32 @@ config_files() -> %%--------------------------------------------------------------------------- +load_applications() -> + load_applications(queue:from_list(?APPS), sets:new()). + +load_applications(Worklist, Loaded) -> + case queue:out(Worklist) of + {empty, _WorkList} -> + ok; + {{value, App}, Worklist1} -> + case sets:is_element(App, Loaded) of + true -> load_applications(Worklist1, Loaded); + false -> case application:load(App) of + ok -> ok; + {error, {already_loaded, App}} -> ok; + Error -> throw(Error) + end, + load_applications( + case application:get_key(App, applications) of + undefined -> Worklist1; + {ok, Lst} -> queue:join(Worklist1, + queue:from_list(Lst)) + end, sets:add_element(App, Loaded)) + end + end. + application_load_order() -> + ok = load_applications(), {ok, G} = rabbit_misc:build_acyclic_graph( fun application_graph_vertex/2, fun application_graph_edge/2, [{App, case application:get_key(App, applications) of -- cgit v1.2.1 From 32331e5f2c86b0f08957a2353cf79be75d89e6ad Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 28 Jun 2011 16:57:23 +0100 Subject: factorisation --- src/rabbit.erl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 5ec0611a..2a7369c0 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -406,11 +406,9 @@ load_applications(Worklist, Loaded) -> Error -> throw(Error) end, load_applications( - case application:get_key(App, applications) of - undefined -> Worklist1; - {ok, Lst} -> queue:join(Worklist1, - queue:from_list(Lst)) - end, sets:add_element(App, Loaded)) + queue:join(Worklist1, + queue:from_list(app_dependencies(App))), + sets:add_element(App, Loaded)) end end. @@ -418,10 +416,7 @@ application_load_order() -> ok = load_applications(), {ok, G} = rabbit_misc:build_acyclic_graph( fun application_graph_vertex/2, fun application_graph_edge/2, - [{App, case application:get_key(App, applications) of - undefined -> []; - {ok, Lst} -> Lst - end} || + [{App, app_dependencies(App)} || {App, _Desc, _Vsn} <- application:loaded_applications()]), true = digraph:del_vertices( G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), @@ -429,6 +424,12 @@ application_load_order() -> true = digraph:delete(G), Result. +app_dependencies(App) -> + case application:get_key(App, applications) of + undefined -> []; + {ok, Lst} -> Lst + end. + application_graph_vertex(App, _Deps) -> [{App, App}]. application_graph_edge(App, Deps) -> [{Dep, App} || Dep <- Deps]. -- cgit v1.2.1 From 5e7e810d856ae05c069afb69bcd0a8ee2ff353e1 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 29 Jun 2011 10:20:43 +0100 Subject: cosmetic: give rabbit.erl some structure --- src/rabbit.erl | 310 +++++++++++++++++++++++++++++---------------------------- 1 file changed, 157 insertions(+), 153 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 2a7369c0..6ef816c0 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -23,7 +23,7 @@ -export([start/2, stop/1]). --export([log_location/1]). +-export([log_location/1]). %% for testing %%--------------------------------------------------------------------------- %% Boot steps. @@ -267,20 +267,51 @@ stop(_State) -> ok. %%--------------------------------------------------------------------------- +%% application life cycle -erts_version_check() -> - FoundVer = erlang:system_info(version), - case rabbit_misc:version_compare(?ERTS_MINIMUM, FoundVer, lte) of - true -> ok; - false -> {error, {erlang_version_too_old, - {found, FoundVer}, {required, ?ERTS_MINIMUM}}} +application_load_order() -> + ok = load_applications(), + {ok, G} = rabbit_misc:build_acyclic_graph( + fun (App, _Deps) -> [{App, App}] end, + fun (App, Deps) -> [{Dep, App} || Dep <- Deps] end, + [{App, app_dependencies(App)} || + {App, _Desc, _Vsn} <- application:loaded_applications()]), + true = digraph:del_vertices( + G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), + Result = digraph_utils:topsort(G), + true = digraph:delete(G), + Result. + +load_applications() -> + load_applications(queue:from_list(?APPS), sets:new()). + +load_applications(Worklist, Loaded) -> + case queue:out(Worklist) of + {empty, _WorkList} -> + ok; + {{value, App}, Worklist1} -> + case sets:is_element(App, Loaded) of + true -> load_applications(Worklist1, Loaded); + false -> case application:load(App) of + ok -> ok; + {error, {already_loaded, App}} -> ok; + Error -> throw(Error) + end, + load_applications( + queue:join(Worklist1, + queue:from_list(app_dependencies(App))), + sets:add_element(App, Loaded)) + end end. -boot_error(Format, Args) -> - io:format("BOOT ERROR: " ++ Format, Args), - error_logger:error_msg(Format, Args), - timer:sleep(1000), - exit({?MODULE, failure_during_boot}). +app_dependencies(App) -> + case application:get_key(App, applications) of + undefined -> []; + {ok, Lst} -> Lst + end. + +%%--------------------------------------------------------------------------- +%% boot step logic run_boot_step({StepName, Attributes}) -> Description = case lists:keysearch(description, 1, Attributes) of @@ -355,129 +386,46 @@ sort_boot_steps(UnsortedSteps) -> end]) end. -%%--------------------------------------------------------------------------- - -log_location(Type) -> - case application:get_env(Type, case Type of - kernel -> error_logger; - sasl -> sasl_error_logger - end) of - {ok, {file, File}} -> File; - {ok, false} -> undefined; - {ok, tty} -> tty; - {ok, silent} -> undefined; - {ok, Bad} -> throw({error, {cannot_log_to_file, Bad}}); - _ -> undefined - end. - -app_location() -> - {ok, Application} = application:get_application(), - filename:absname(code:where_is_file(atom_to_list(Application) ++ ".app")). - -home_dir() -> - case init:get_argument(home) of - {ok, [[Home]]} -> Home; - Other -> Other - end. - -config_files() -> - case init:get_argument(config) of - {ok, Files} -> [filename:absname( - filename:rootname(File, ".config") ++ ".config") || - File <- Files]; - error -> [] - end. +boot_error(Format, Args) -> + io:format("BOOT ERROR: " ++ Format, Args), + error_logger:error_msg(Format, Args), + timer:sleep(1000), + exit({?MODULE, failure_during_boot}). %%--------------------------------------------------------------------------- +%% boot step functions -load_applications() -> - load_applications(queue:from_list(?APPS), sets:new()). - -load_applications(Worklist, Loaded) -> - case queue:out(Worklist) of - {empty, _WorkList} -> - ok; - {{value, App}, Worklist1} -> - case sets:is_element(App, Loaded) of - true -> load_applications(Worklist1, Loaded); - false -> case application:load(App) of - ok -> ok; - {error, {already_loaded, App}} -> ok; - Error -> throw(Error) - end, - load_applications( - queue:join(Worklist1, - queue:from_list(app_dependencies(App))), - sets:add_element(App, Loaded)) - end - end. +boot_delegate() -> + {ok, Count} = application:get_env(rabbit, delegate_count), + rabbit_sup:start_child(delegate_sup, [Count]). -application_load_order() -> - ok = load_applications(), - {ok, G} = rabbit_misc:build_acyclic_graph( - fun application_graph_vertex/2, fun application_graph_edge/2, - [{App, app_dependencies(App)} || - {App, _Desc, _Vsn} <- application:loaded_applications()]), - true = digraph:del_vertices( - G, digraph:vertices(G) -- digraph_utils:reachable(?APPS, G)), - Result = digraph_utils:topsort(G), - true = digraph:delete(G), - Result. +recover() -> + rabbit_binding:recover(rabbit_exchange:recover(), rabbit_amqqueue:start()). -app_dependencies(App) -> - case application:get_key(App, applications) of - undefined -> []; - {ok, Lst} -> Lst +maybe_insert_default_data() -> + case rabbit_mnesia:is_db_empty() of + true -> insert_default_data(); + false -> ok end. -application_graph_vertex(App, _Deps) -> [{App, App}]. - -application_graph_edge(App, Deps) -> [{Dep, App} || Dep <- Deps]. +insert_default_data() -> + {ok, DefaultUser} = application:get_env(default_user), + {ok, DefaultPass} = application:get_env(default_pass), + {ok, DefaultTags} = application:get_env(default_user_tags), + {ok, DefaultVHost} = application:get_env(default_vhost), + {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} = + application:get_env(default_permissions), + ok = rabbit_vhost:add(DefaultVHost), + ok = rabbit_auth_backend_internal:add_user(DefaultUser, DefaultPass), + ok = rabbit_auth_backend_internal:set_tags(DefaultUser, DefaultTags), + ok = rabbit_auth_backend_internal:set_permissions(DefaultUser, DefaultVHost, + DefaultConfigurePerm, + DefaultWritePerm, + DefaultReadPerm), + ok. %%--------------------------------------------------------------------------- - -print_banner() -> - {ok, Product} = application:get_key(id), - {ok, Version} = application:get_key(vsn), - ProductLen = string:len(Product), - io:format("~n" - "+---+ +---+~n" - "| | | |~n" - "| | | |~n" - "| | | |~n" - "| +---+ +-------+~n" - "| |~n" - "| ~s +---+ |~n" - "| | | |~n" - "| ~s +---+ |~n" - "| |~n" - "+-------------------+~n" - "~s~n~s~n~s~n~n", - [Product, string:right([$v|Version], ProductLen), - ?PROTOCOL_VERSION, - ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]), - Settings = [{"node", node()}, - {"app descriptor", app_location()}, - {"home dir", home_dir()}, - {"config file(s)", config_files()}, - {"cookie hash", rabbit_misc:cookie_hash()}, - {"log", log_location(kernel)}, - {"sasl log", log_location(sasl)}, - {"database dir", rabbit_mnesia:dir()}, - {"erlang version", erlang:system_info(version)}], - DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]), - Format = fun (K, V) -> - io:format("~-" ++ integer_to_list(DescrLen) ++ "s: ~s~n", - [K, V]) - end, - lists:foreach(fun ({"config file(s)" = K, []}) -> - Format(K, "(none)"); - ({"config file(s)" = K, [V0 | Vs]}) -> - Format(K, V0), [Format("", V) || V <- Vs]; - ({K, V}) -> - Format(K, V) - end, Settings), - io:nl(). +%% logging ensure_working_log_handlers() -> Handlers = gen_event:which_handlers(error_logger), @@ -516,35 +464,19 @@ ensure_working_log_handler(OldFHandler, NewFHandler, TTYHandler, end end. -boot_delegate() -> - {ok, Count} = application:get_env(rabbit, delegate_count), - rabbit_sup:start_child(delegate_sup, [Count]). - -recover() -> - rabbit_binding:recover(rabbit_exchange:recover(), rabbit_amqqueue:start()). - -maybe_insert_default_data() -> - case rabbit_mnesia:is_db_empty() of - true -> insert_default_data(); - false -> ok +log_location(Type) -> + case application:get_env(Type, case Type of + kernel -> error_logger; + sasl -> sasl_error_logger + end) of + {ok, {file, File}} -> File; + {ok, false} -> undefined; + {ok, tty} -> tty; + {ok, silent} -> undefined; + {ok, Bad} -> throw({error, {cannot_log_to_file, Bad}}); + _ -> undefined end. -insert_default_data() -> - {ok, DefaultUser} = application:get_env(default_user), - {ok, DefaultPass} = application:get_env(default_pass), - {ok, DefaultTags} = application:get_env(default_user_tags), - {ok, DefaultVHost} = application:get_env(default_vhost), - {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} = - application:get_env(default_permissions), - ok = rabbit_vhost:add(DefaultVHost), - ok = rabbit_auth_backend_internal:add_user(DefaultUser, DefaultPass), - ok = rabbit_auth_backend_internal:set_tags(DefaultUser, DefaultTags), - ok = rabbit_auth_backend_internal:set_permissions(DefaultUser, DefaultVHost, - DefaultConfigurePerm, - DefaultWritePerm, - DefaultReadPerm), - ok. - rotate_logs(File, Suffix, Handler) -> rotate_logs(File, Suffix, Handler, Handler). @@ -567,3 +499,75 @@ log_rotation_result(ok, {error, SaslLogError}) -> {error, {cannot_rotate_sasl_logs, SaslLogError}}; log_rotation_result(ok, ok) -> ok. + +%%--------------------------------------------------------------------------- +%% misc + +erts_version_check() -> + FoundVer = erlang:system_info(version), + case rabbit_misc:version_compare(?ERTS_MINIMUM, FoundVer, lte) of + true -> ok; + false -> {error, {erlang_version_too_old, + {found, FoundVer}, {required, ?ERTS_MINIMUM}}} + end. + +print_banner() -> + {ok, Product} = application:get_key(id), + {ok, Version} = application:get_key(vsn), + ProductLen = string:len(Product), + io:format("~n" + "+---+ +---+~n" + "| | | |~n" + "| | | |~n" + "| | | |~n" + "| +---+ +-------+~n" + "| |~n" + "| ~s +---+ |~n" + "| | | |~n" + "| ~s +---+ |~n" + "| |~n" + "+-------------------+~n" + "~s~n~s~n~s~n~n", + [Product, string:right([$v|Version], ProductLen), + ?PROTOCOL_VERSION, + ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]), + Settings = [{"node", node()}, + {"app descriptor", app_location()}, + {"home dir", home_dir()}, + {"config file(s)", config_files()}, + {"cookie hash", rabbit_misc:cookie_hash()}, + {"log", log_location(kernel)}, + {"sasl log", log_location(sasl)}, + {"database dir", rabbit_mnesia:dir()}, + {"erlang version", erlang:system_info(version)}], + DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]), + Format = fun (K, V) -> + io:format("~-" ++ integer_to_list(DescrLen) ++ "s: ~s~n", + [K, V]) + end, + lists:foreach(fun ({"config file(s)" = K, []}) -> + Format(K, "(none)"); + ({"config file(s)" = K, [V0 | Vs]}) -> + Format(K, V0), [Format("", V) || V <- Vs]; + ({K, V}) -> + Format(K, V) + end, Settings), + io:nl(). + +app_location() -> + {ok, Application} = application:get_application(), + filename:absname(code:where_is_file(atom_to_list(Application) ++ ".app")). + +home_dir() -> + case init:get_argument(home) of + {ok, [[Home]]} -> Home; + Other -> Other + end. + +config_files() -> + case init:get_argument(config) of + {ok, Files} -> [filename:absname( + filename:rootname(File, ".config") ++ ".config") || + File <- Files]; + error -> [] + end. -- cgit v1.2.1 From 5a019a63958f75164beffefc626094a6c41bea2c Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 29 Jun 2011 11:35:49 +0100 Subject: Spec --- src/rabbit_exchange.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 0c335463..be88fba3 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -58,6 +58,9 @@ (name()) -> rabbit_types:exchange() | rabbit_types:channel_exit()). -spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]). +-spec(update/2 :: + (name(), fun((rabbit_types:exchange()) -> rabbit_types:exchange())) + -> rabbit_types:ok_or_error(term())). -spec(info_keys/0 :: () -> rabbit_types:info_keys()). -spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()). -spec(info/2 :: -- cgit v1.2.1 From 2238eacec12a34849fafc68946391b5fa74c37bf Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 29 Jun 2011 12:08:49 +0100 Subject: - use rabbit_misc:execute_mnesia_transaction - call the function update_scratch and only update the scratch space - call Fun(X) once - make 'read' into a 'wread' --- src/rabbit_exchange.erl | 42 ++++++++++++++++++------------------------ 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index be88fba3..0afd2c5a 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -20,7 +20,7 @@ -export([recover/0, callback/3, declare/6, assert_equivalence/6, assert_args_equivalence/2, check_type/1, - lookup/1, lookup_or_die/1, list/1, update/2, + lookup/1, lookup_or_die/1, list/1, update_scratch/2, info_keys/0, info/1, info/2, info_all/1, info_all/2, publish/2, delete/2]). %% these must be run inside a mnesia tx @@ -58,9 +58,7 @@ (name()) -> rabbit_types:exchange() | rabbit_types:channel_exit()). -spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]). --spec(update/2 :: - (name(), fun((rabbit_types:exchange()) -> rabbit_types:exchange())) - -> rabbit_types:ok_or_error(term())). +-spec(update_scratch/2 :: (name(), fun((term()) -> term())) -> 'ok'). -spec(info_keys/0 :: () -> rabbit_types:info_keys()). -spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()). -spec(info/2 :: @@ -202,26 +200,22 @@ list(VHostPath) -> rabbit_exchange, #exchange{name = rabbit_misc:r(VHostPath, exchange), _ = '_'}). -update(Name, Fun) -> - case mnesia:transaction( - fun() -> - case mnesia:read(rabbit_exchange, Name, write) of - [X = #exchange{durable = Durable}] -> - ok = mnesia:write(rabbit_exchange, Fun(X), write), - case Durable of - true -> - ok = mnesia:write(rabbit_durable_exchange, - Fun(X), write); - _ -> - ok - end; - [] -> - ok - end - end) of - {atomic, ok} -> ok; - {aborted, Reason} -> {error, Reason} - end. +update_scratch(Name, Fun) -> + rabbit_misc:execute_mnesia_transaction( + fun() -> + case mnesia:wread({rabbit_exchange, Name}) of + [X = #exchange{durable = Durable, scratch = Scratch}] -> + X1 = X#exchange{scratch = Fun(Scratch)}, + ok = mnesia:write(rabbit_exchange, X1, write), + case Durable of + true -> ok = mnesia:write(rabbit_durable_exchange, + X1, write); + _ -> ok + end; + [] -> + ok + end + end). info_keys() -> ?INFO_KEYS. -- cgit v1.2.1 From 61fc23804e307dc4715565799fac96eb7b84aa84 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 1 Jul 2011 16:08:19 +0100 Subject: Better code coverage --- quickcheck | 12 ++++++---- src/rabbit_backing_queue_qc.erl | 49 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 52 insertions(+), 9 deletions(-) diff --git a/quickcheck b/quickcheck index 24edae70..a36cf3ed 100755 --- a/quickcheck +++ b/quickcheck @@ -8,8 +8,9 @@ %% ModStr is the module containing quickcheck properties %% The number of trials is optional main([NodeStr, ModStr | TrialsStr]) -> - Node = list_to_atom(NodeStr ++ "@" ++ net_adm:localhost()), - Mod = list_to_atom(ModStr), + {ok, Hostname} = inet:gethostname(), + Node = list_to_atom(NodeStr ++ "@" ++ Hostname), + Mod = list_to_atom(ModStr), Trials = lists:map(fun erlang:list_to_integer/1, TrialsStr), case rpc:call(Node, code, ensure_loaded, [proper]) of {module, proper} -> @@ -17,8 +18,11 @@ main([NodeStr, ModStr | TrialsStr]) -> [] -> ok; _ -> quit(1) end; - _ -> - io:format("PropEr module not present on node ~p.~n", [Node]), + {badrpc, Reason} -> + io:format("Could not contact node ~p: ~p.~n", [Node, Reason]), + quit(2); + {error,nofile} -> + io:format("Module PropEr was not found on node ~p~n", [Node]), quit(2) end; main([]) -> diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index c0938be3..94e1ef3a 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -78,12 +78,14 @@ prop_backing_queue_test() -> command(S) -> ?SIZED(Size, frequency([{Size, qc_publish(S)}, + {Size, qc_publish_delivered(S)}, {Size, qc_fetch(S)}, {Size, qc_ack(S)}, {Size, qc_requeue(S)}, {Size, qc_ram(S)}, {Size, qc_drain_confirmed(S)}, {Size, qc_dropwhile(S)}, + {Size, qc_is_empty(S)}, {1, qc_purge(S)}])). qc_publish(#state{bqstate = BQ}) -> @@ -94,6 +96,10 @@ qc_publish(#state{bqstate = BQ}) -> expiry = choose(0, 10)}, self(), BQ]}. +qc_publish_delivered(#state{bqstate = BQ}) -> + {call, ?BQMOD, publish_delivered, + [boolean(), qc_message(), #message_properties{}, self(), BQ]}. + qc_fetch(#state{bqstate = BQ}) -> {call, ?BQMOD, fetch, [boolean(), BQ]}. @@ -113,6 +119,9 @@ qc_drain_confirmed(#state{bqstate = BQ}) -> qc_dropwhile(#state{bqstate = BQ}) -> {call, ?BQMOD, dropwhile, [fun dropfun/1, BQ]}. +qc_is_empty(#state{bqstate = BQ}) -> + {call, ?BQMOD, is_empty, [BQ]}. + qc_purge(#state{bqstate = BQ}) -> {call, ?BQMOD, purge, [BQ]}. @@ -121,6 +130,9 @@ qc_purge(#state{bqstate = BQ}) -> precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg}) when Fun =:= ack; Fun =:= requeue -> length(Acks) > 0; +precondition(#state{messages = Messages}, + {call, ?BQMOD, publish_delivered, _Arg}) -> + queue:is_empty(Messages); precondition(_S, {call, ?BQMOD, _Fun, _Arg}) -> true. @@ -132,13 +144,33 @@ next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Pid, _BQ]}) -> NeedsConfirm = {call, erlang, element, [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]}, - Confirms1 = case eval(NeedsConfirm) of - true -> gb_sets:add(MsgId, Confirms); - _ -> Confirms - end, S#state{bqstate = BQ, messages = queue:in({MsgProps, Msg}, Messages), - confirms = Confirms1}; + confirms = Confirms1 = case eval(NeedsConfirm) of + true -> gb_sets:add(MsgId, Confirms); + _ -> Confirms + end}; + +next_state(S, Res, + {call, ?BQMOD, publish_delivered, + [AckReq, Msg, MsgProps, _Pid, _BQ]}) -> + #state{confirms = Confirms, acks = Acks} = S, + AckTag = {call, erlang, element, [1, Res]}, + BQ1 = {call, erlang, element, [2, Res]}, + MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]}, + NeedsConfirm = + {call, erlang, element, + [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]}, + S#state{bqstate = BQ1, + confirms = case eval(NeedsConfirm) of + true -> gb_sets:add(MsgId, Confirms); + _ -> Confirms + end, + acks = Acks ++ case AckReq of + true -> [{AckTag, {MsgProps, Msg}}]; + false -> [] + end + }; next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> #state{messages = Messages, acks = Acks} = S, @@ -182,6 +214,9 @@ next_state(S, BQ1, {call, ?BQMOD, dropwhile, _Args}) -> #state{messages = Messages} = S, S#state{bqstate = BQ1, messages = drop_messages(Messages)}; +next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) -> + S; + next_state(S, Res, {call, ?BQMOD, purge, _Args}) -> BQ1 = {call, erlang, element, [2, Res]}, S#state{bqstate = BQ1, messages = queue:new()}. @@ -201,6 +236,10 @@ postcondition(#state{messages = Messages}, {call, ?BQMOD, purge, _Args}, Res) -> {PurgeCount, _BQ} = Res, queue:len(Messages) =:= PurgeCount; +postcondition(#state{messages = Messages}, + {call, ?BQMOD, is_empty, _Args}, Res) -> + (queue:len(Messages) =:= 0) =:= Res; + postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) -> #state{confirms = Confirms} = S, {ReportedConfirmed, _BQ} = Res, -- cgit v1.2.1 From 7d098036e5d07899fb0a4e9590107d5fd27c57a7 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 1 Jul 2011 16:46:18 +0100 Subject: cosmetic: we tend to prefer 'any()' over 'term()' --- src/rabbit_exchange.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index 0afd2c5a..a3300a59 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -58,7 +58,7 @@ (name()) -> rabbit_types:exchange() | rabbit_types:channel_exit()). -spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]). --spec(update_scratch/2 :: (name(), fun((term()) -> term())) -> 'ok'). +-spec(update_scratch/2 :: (name(), fun((any()) -> any())) -> 'ok'). -spec(info_keys/0 :: () -> rabbit_types:info_keys()). -spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()). -spec(info/2 :: -- cgit v1.2.1 From 2482a9bf4edd64b17f50aa31f600d1bf7c9ed361 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 1 Jul 2011 18:40:22 +0100 Subject: cosmetic: move docs of confirm-related channel info items to better place --- docs/rabbitmqctl.1.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index a0f03192..e733792f 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1199,6 +1199,10 @@ transactional True if the channel is in transactional mode, false otherwise. + + confirm + True if the channel is in confirm mode, false otherwise. + consumer_count Number of logical AMQP consumers retrieving messages via @@ -1214,6 +1218,12 @@ Number of acknowledgements received in an as yet uncommitted transaction. + + messages_unconfirmed + Number of published messages not yet + confirmed. On channels not in confirm mode, this + remains 0. + prefetch_count QoS prefetch count limit in force, 0 if unlimited. @@ -1226,16 +1236,6 @@ messages to the channel's consumers. - - confirm - True if the channel is in confirm mode, false otherwise. - - - messages_unconfirmed - Number of published messages not yet - confirmed. On channels not in confirm mode, this - remains 0. - If no channelinfoitems are specified then pid, -- cgit v1.2.1 From b8f5f15df883bbed2e575ca74db865453eac525d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 1 Jul 2011 18:42:01 +0100 Subject: remove 'transactional' flag from 'rabbitmqctl list_channel' --- docs/rabbitmqctl.1.xml | 4 ++-- src/rabbit_control.erl | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index e733792f..71764522 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1238,8 +1238,8 @@ - If no channelinfoitems are specified then pid, - user, transactional, consumer_count, and + If no channelinfoitems are specified + then pid, user, consumer_count, and messages_unacknowledged are assumed. diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 9eef384a..6eb1aaba 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -301,7 +301,7 @@ action(list_connections, Node, Args, _Opts, Inform) -> action(list_channels, Node, Args, _Opts, Inform) -> Inform("Listing channels", []), - ArgAtoms = default_if_empty(Args, [pid, user, transactional, consumer_count, + ArgAtoms = default_if_empty(Args, [pid, user, consumer_count, messages_unacknowledged]), display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms]), ArgAtoms); -- cgit v1.2.1 From feee4949ef59a2c7b6c65c94fff145321ee6a94a Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Sat, 2 Jul 2011 10:54:23 +0100 Subject: cosmetic: more sensible order of qpids for routing --- src/rabbit_router.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl index 26780676..d453a870 100644 --- a/src/rabbit_router.erl +++ b/src/rabbit_router.erl @@ -111,7 +111,7 @@ lookup_qpids(QNames) -> lists:foldl(fun (QName, QPids) -> case mnesia:dirty_read({rabbit_queue, QName}) of [#amqqueue{pid = QPid, slave_pids = SPids}] -> - SPids ++ [QPid | QPids]; + [QPid | SPids ++ QPids]; [] -> QPids end -- cgit v1.2.1 From 1d167ee737a77aab2ebe9b0edd983a9220ea5a69 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Mon, 4 Jul 2011 10:38:30 +0100 Subject: change tx semantics to 'batching' We keep track of uncommitted messages and acks in the channel. All routing decisions are made instantly, which means errors are detected straight away. We increment pub/ack stats on commit only. --- docs/rabbitmqctl.1.xml | 16 +++++++- src/rabbit_basic.erl | 3 +- src/rabbit_channel.erl | 100 +++++++++++++++++++++++++++++++++++++----------- src/rabbit_exchange.erl | 20 +++++----- 4 files changed, 103 insertions(+), 36 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index fdb49912..93c3fcd8 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1217,7 +1217,21 @@ messages to the channel's consumers. - + + transactional + True if the channel is in transactional mode, false otherwise. + + + messages_uncommitted + Number of messages received in an as yet + uncommitted transaction. + + + acks_uncommitted + Number of acknowledgements received in an as yet + uncommitted transaction. + + confirm True if the channel is in confirm mode, false otherwise. diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl index ec8ed351..9cc406e7 100644 --- a/src/rabbit_basic.erl +++ b/src/rabbit_basic.erl @@ -170,7 +170,8 @@ publish(XName, RKey, Mandatory, Immediate, Props, Body) -> end. publish(X, Delivery) -> - {RoutingRes, DeliveredQPids} = rabbit_exchange:publish(X, Delivery), + {RoutingRes, DeliveredQPids} = + rabbit_router:deliver(rabbit_exchange:route(X, Delivery), Delivery), {ok, RoutingRes, DeliveredQPids}. is_message_persistent(#content{properties = #'P_basic'{ diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index f0f8c4dd..df337aef 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -30,7 +30,8 @@ prioritise_cast/2]). -record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, - limiter_pid, start_limiter_fun, next_tag, unacked_message_q, + limiter_pid, start_limiter_fun, tx_enabled, next_tag, + unacked_message_q, uncommitted_message_q, uncommitted_ack_q, user, virtual_host, most_recently_declared_queue, consumer_mapping, blocking, consumer_monitors, queue_collector_pid, stats_timer, confirm_enabled, publish_seqno, unconfirmed_mq, @@ -40,10 +41,13 @@ -define(STATISTICS_KEYS, [pid, + transactional, confirm, consumer_count, messages_unacknowledged, messages_unconfirmed, + messages_uncommitted, + acks_uncommitted, prefetch_count, client_flow_blocked]). @@ -170,8 +174,11 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, conn_pid = ConnPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, + tx_enabled = false, next_tag = 1, unacked_message_q = queue:new(), + uncommitted_message_q = queue:new(), + uncommitted_ack_q = queue:new(), user = User, virtual_host = VHost, most_recently_declared_queue = <<>>, @@ -595,6 +602,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, mandatory = Mandatory, immediate = Immediate}, Content, State = #ch{virtual_host = VHostPath, + tx_enabled = TxEnabled, confirm_enabled = ConfirmEnabled, trace_state = TraceState}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), @@ -614,16 +622,15 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> rabbit_trace:tap_trace_in(Message, TraceState), - {RoutingRes, DeliveredQPids} = - rabbit_exchange:publish( - Exchange, rabbit_basic:delivery(Mandatory, Immediate, Message, - MsgSeqNo)), - State2 = process_routing_result(RoutingRes, DeliveredQPids, - ExchangeName, MsgSeqNo, Message, - State1), - maybe_incr_stats([{ExchangeName, 1} | - [{{QPid, ExchangeName}, 1} || - QPid <- DeliveredQPids]], publish, State2), + Delivery = rabbit_basic:delivery(Mandatory, Immediate, Message, + MsgSeqNo), + QNames = rabbit_exchange:route(Exchange, Delivery), + State2 = case TxEnabled of + true -> TMQ = State1#ch.uncommitted_message_q, + NewTMQ = queue:in({Delivery, QNames}, TMQ), + State1#ch{uncommitted_message_q = NewTMQ}; + false -> deliver_to_queues({Delivery, QNames}, State1) + end, {noreply, State2}; {error, Reason} -> rabbit_misc:protocol_error(precondition_failed, @@ -638,12 +645,16 @@ handle_method(#'basic.nack'{delivery_tag = DeliveryTag, handle_method(#'basic.ack'{delivery_tag = DeliveryTag, multiple = Multiple}, - _, State = #ch{unacked_message_q = UAMQ}) -> + _, State = #ch{unacked_message_q = UAMQ, + tx_enabled = TxEnabled}) -> {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple), - QIncs = ack(Acked), - maybe_incr_stats(QIncs, ack, State), - ok = notify_limiter(State#ch.limiter_pid, Acked), - {noreply, State#ch{unacked_message_q = Remaining}}; + State1 = State#ch{unacked_message_q = Remaining}, + {noreply, case TxEnabled of + true -> NewTAQ = queue:join(State1#ch.uncommitted_ack_q, + Acked), + State1#ch{uncommitted_ack_q = NewTAQ}; + false -> ack(Acked, State1) + end}; handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, @@ -1024,6 +1035,26 @@ handle_method(#'queue.purge'{queue = QueueNameBin, return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); +handle_method(#'tx.select'{}, _, State) -> + {reply, #'tx.select_ok'{}, State#ch{tx_enabled = true}}; + +handle_method(#'tx.commit'{}, _, #ch{tx_enabled = false}) -> + rabbit_misc:protocol_error( + precondition_failed, "channel is not transactional", []); + +handle_method(#'tx.commit'{}, _, State = #ch{uncommitted_message_q = TMQ, + uncommitted_ack_q = TAQ}) -> + State1 = rabbit_misc:queue_fold(fun deliver_to_queues/2, State, TMQ), + {reply, #'tx.commit_ok'{}, new_tx(ack(TAQ, State1))}; + +handle_method(#'tx.rollback'{}, _, #ch{tx_enabled = false}) -> + rabbit_misc:protocol_error( + precondition_failed, "channel is not transactional", []); + +handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ, + uncommitted_ack_q = TAQ}) -> + {reply, #'tx.rollback_ok'{}, new_tx(State#ch{unacked_message_q = + queue:join(TAQ, UAMQ)})}; handle_method(#'confirm.select'{nowait = NoWait}, _, State) -> return_ok(State#ch{confirm_enabled = true}, @@ -1200,11 +1231,18 @@ collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) -> precondition_failed, "unknown delivery tag ~w", [DeliveryTag]) end. -ack(UAQ) -> - fold_per_queue(fun (QPid, MsgIds, L) -> - ok = rabbit_amqqueue:ack(QPid, MsgIds, self()), - [{QPid, length(MsgIds)} | L] - end, [], UAQ). +ack(Acked, State) -> + QIncs = fold_per_queue( + fun (QPid, MsgIds, L) -> + ok = rabbit_amqqueue:ack(QPid, MsgIds, self()), + [{QPid, length(MsgIds)} | L] + end, [], Acked), + maybe_incr_stats(QIncs, ack, State), + ok = notify_limiter(State#ch.limiter_pid, Acked), + State. + +new_tx(State) -> State#ch{uncommitted_message_q = queue:new(), + uncommitted_ack_q = queue:new()}. notify_queues(State = #ch{state = closing}) -> {ok, State}; @@ -1255,6 +1293,18 @@ notify_limiter(LimiterPid, Acked) -> Count -> rabbit_limiter:ack(LimiterPid, Count) end. +deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{ + exchange_name = XName}, + msg_seq_no = MsgSeqNo}, + QNames}, State) -> + {RoutingRes, DeliveredQPids} = rabbit_router:deliver(QNames, Delivery), + State1 = process_routing_result(RoutingRes, DeliveredQPids, + XName, MsgSeqNo, Message, State), + maybe_incr_stats([{XName, 1} | + [{{QPid, XName}, 1} || + QPid <- DeliveredQPids]], publish, State1), + State1. + process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State, no_route), maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], @@ -1262,8 +1312,7 @@ process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) -> record_confirm(MsgSeqNo, XName, State); process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) -> ok = basic_return(Msg, State, no_consumers), - maybe_incr_stats([{Msg#basic_message.exchange_name, 1}], - return_not_delivered, State), + maybe_incr_stats([{XName, 1}], return_not_delivered, State), record_confirm(MsgSeqNo, XName, State); process_routing_result(routed, [], XName, MsgSeqNo, _, State) -> record_confirm(MsgSeqNo, XName, State); @@ -1343,6 +1392,7 @@ i(connection, #ch{conn_pid = ConnPid}) -> ConnPid; i(number, #ch{channel = Channel}) -> Channel; i(user, #ch{user = User}) -> User#user.username; i(vhost, #ch{virtual_host = VHost}) -> VHost; +i(transactional, #ch{tx_enabled = TE}) -> TE; i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); @@ -1350,6 +1400,10 @@ i(messages_unconfirmed, #ch{unconfirmed_mq = UMQ}) -> gb_trees:size(UMQ); i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> queue:len(UAMQ); +i(messages_uncommitted, #ch{uncommitted_message_q = TMQ}) -> + queue:len(TMQ); +i(acks_uncommitted, #ch{uncommitted_ack_q = TAQ}) -> + queue:len(TAQ); i(prefetch_count, #ch{limiter_pid = LimiterPid}) -> rabbit_limiter:get_limit(LimiterPid); i(client_flow_blocked, #ch{limiter_pid = LimiterPid}) -> diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl index cab1b99f..cecd879a 100644 --- a/src/rabbit_exchange.erl +++ b/src/rabbit_exchange.erl @@ -22,7 +22,7 @@ assert_equivalence/6, assert_args_equivalence/2, check_type/1, lookup/1, lookup_or_die/1, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, - publish/2, delete/2]). + route/2, delete/2]). %% these must be run inside a mnesia tx -export([maybe_auto_delete/1, serial/1, peek_serial/1]). @@ -66,8 +66,8 @@ -spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]). -spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys()) -> [rabbit_types:infos()]). --spec(publish/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) - -> {rabbit_router:routing_result(), [pid()]}). +-spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery()) + -> [rabbit_amqqueue:name()]). -spec(delete/2 :: (name(), boolean())-> 'ok' | rabbit_types:error('not_found') | @@ -224,21 +224,19 @@ info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end). info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end). -publish(X = #exchange{name = XName}, Delivery) -> - rabbit_router:deliver( - route(Delivery, {queue:from_list([X]), XName, []}), - Delivery). +route(X = #exchange{name = XName}, Delivery) -> + route1(Delivery, {queue:from_list([X]), XName, []}). -route(Delivery, {WorkList, SeenXs, QNames}) -> +route1(Delivery, {WorkList, SeenXs, QNames}) -> case queue:out(WorkList) of {empty, _WorkList} -> lists:usort(QNames); {{value, X = #exchange{type = Type}}, WorkList1} -> DstNames = process_alternate( X, ((type_to_module(Type)):route(X, Delivery))), - route(Delivery, - lists:foldl(fun process_route/2, {WorkList1, SeenXs, QNames}, - DstNames)) + route1(Delivery, + lists:foldl(fun process_route/2, {WorkList1, SeenXs, QNames}, + DstNames)) end. process_alternate(#exchange{name = XName, arguments = Args}, []) -> -- cgit v1.2.1 From a6f2674e3fb810bfd8a52d388dde5fa95cdfbb20 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 5 Jul 2011 08:53:42 +0100 Subject: confirm on tx.rollback --- src/rabbit_channel.erl | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index df337aef..81a0ee80 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -516,6 +516,10 @@ queue_blocked(QPid, State = #ch{blocking = Blocking}) -> State#ch{blocking = Blocking1} end. +blind_confirm({#delivery{message = #basic_message{exchange_name = XName}, + msg_seq_no = MsgSeqNo}, _QNames}, State) -> + record_confirm(MsgSeqNo, XName, State). + record_confirm(undefined, _, State) -> State; record_confirm(MsgSeqNo, XName, State) -> @@ -1051,10 +1055,12 @@ handle_method(#'tx.rollback'{}, _, #ch{tx_enabled = false}) -> rabbit_misc:protocol_error( precondition_failed, "channel is not transactional", []); -handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ, - uncommitted_ack_q = TAQ}) -> - {reply, #'tx.rollback_ok'{}, new_tx(State#ch{unacked_message_q = - queue:join(TAQ, UAMQ)})}; +handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ, + uncommitted_message_q = TMQ, + uncommitted_ack_q = TAQ}) -> + State1 = rabbit_misc:queue_fold(fun blind_confirm/2, State, TMQ), + {reply, #'tx.rollback_ok'{}, new_tx(State1#ch{unacked_message_q = + queue:join(TAQ, UAMQ)})}; handle_method(#'confirm.select'{nowait = NoWait}, _, State) -> return_ok(State#ch{confirm_enabled = true}, -- cgit v1.2.1 From 736f928e51790d8a4026978d73f8c453bdf68e94 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 5 Jul 2011 15:52:33 +0100 Subject: Improve code coverage --- Makefile | 1 + src/rabbit_backing_queue_qc.erl | 119 ++++++++++++++++++++++++++++------------ 2 files changed, 85 insertions(+), 35 deletions(-) diff --git a/Makefile b/Makefile index 04f74fda..495689fb 100644 --- a/Makefile +++ b/Makefile @@ -329,3 +329,4 @@ ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" "" -include $(DEPS_FILE) endif +.PHONY: run-qc diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index 94e1ef3a..f018a4b8 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -23,6 +23,7 @@ -behaviour(proper_statem). -define(BQMOD, rabbit_variable_queue). +-define(QUEUE_MAXLEN, 10000). -define(RECORD_INDEX(Key, Record), erlang:element(2, proplists:lookup(Key, lists:zip( @@ -31,9 +32,10 @@ -export([initial_state/0, command/1, precondition/2, postcondition/3, next_state/3]). --export([prop_backing_queue_test/0]). +-export([prop_backing_queue_test/0, publish_multiple/4]). -record(state, {bqstate, + len, %% int messages, %% queue of {msg_props, basic_msg} acks, %% list of {acktag, {message_props, basic_msg}} confirms}). %% set of msgid @@ -42,6 +44,7 @@ initial_state() -> #state{bqstate = qc_variable_queue_init(qc_test_queue()), + len = 0, messages = queue:new(), acks = [], confirms = gb_sets:new()}. @@ -76,17 +79,19 @@ prop_backing_queue_test() -> %% Commands command(S) -> - ?SIZED(Size, - frequency([{Size, qc_publish(S)}, - {Size, qc_publish_delivered(S)}, - {Size, qc_fetch(S)}, - {Size, qc_ack(S)}, - {Size, qc_requeue(S)}, - {Size, qc_ram(S)}, - {Size, qc_drain_confirmed(S)}, - {Size, qc_dropwhile(S)}, - {Size, qc_is_empty(S)}, - {1, qc_purge(S)}])). + frequency([{10, qc_publish(S)}, + {1, qc_publish_delivered(S)}, + {1, qc_publish_multiple(S)}, + {15, qc_fetch(S)}, + {15, qc_ack(S)}, + {15, qc_requeue(S)}, + {3, qc_set_ram_duration_target(S)}, + {1, qc_ram_duration(S)}, + {1, qc_drain_confirmed(S)}, + {1, qc_dropwhile(S)}, + {1, qc_is_empty(S)}, + {1, qc_needs_timeout(S)}, + {5, qc_purge(S)}]). qc_publish(#state{bqstate = BQ}) -> {call, ?BQMOD, publish, @@ -96,6 +101,11 @@ qc_publish(#state{bqstate = BQ}) -> expiry = choose(0, 10)}, self(), BQ]}. +qc_publish_multiple(#state{bqstate = BQ}) -> + {call, ?MODULE, publish_multiple, + [qc_message(), #message_properties{}, BQ, + resize(?QUEUE_MAXLEN, pos_integer())]}. + qc_publish_delivered(#state{bqstate = BQ}) -> {call, ?BQMOD, publish_delivered, [boolean(), qc_message(), #message_properties{}, self(), BQ]}. @@ -110,8 +120,12 @@ qc_requeue(#state{bqstate = BQ, acks = Acks}) -> {call, ?BQMOD, requeue, [rand_choice(proplists:get_keys(Acks)), fun(MsgOpts) -> MsgOpts end, BQ]}. -qc_ram(#state{bqstate = BQ}) -> - {call, ?BQMOD, set_ram_duration_target, [oneof([0, infinity]), BQ]}. +qc_set_ram_duration_target(#state{bqstate = BQ}) -> + {call, ?BQMOD, set_ram_duration_target, + [oneof([0, 1, 100, resize(1000, pos_integer()), infinity]), BQ]}. + +qc_ram_duration(#state{bqstate = BQ}) -> + {call, ?BQMOD, ram_duration, [BQ]}. qc_drain_confirmed(#state{bqstate = BQ}) -> {call, ?BQMOD, drain_confirmed, [BQ]}. @@ -122,6 +136,9 @@ qc_dropwhile(#state{bqstate = BQ}) -> qc_is_empty(#state{bqstate = BQ}) -> {call, ?BQMOD, is_empty, [BQ]}. +qc_needs_timeout(#state{bqstate = BQ}) -> + {call, ?BQMOD, needs_timeout, [BQ]}. + qc_purge(#state{bqstate = BQ}) -> {call, ?BQMOD, purge, [BQ]}. @@ -134,22 +151,34 @@ precondition(#state{messages = Messages}, {call, ?BQMOD, publish_delivered, _Arg}) -> queue:is_empty(Messages); precondition(_S, {call, ?BQMOD, _Fun, _Arg}) -> - true. + true; +precondition(#state{len = Len}, {call, ?MODULE, publish_multiple, _Arg}) -> + Len < ?QUEUE_MAXLEN. %% Model updates next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Pid, _BQ]}) -> - #state{messages = Messages, confirms = Confirms} = S, + #state{len = Len, messages = Messages, confirms = Confirms} = S, MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]}, NeedsConfirm = {call, erlang, element, [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]}, S#state{bqstate = BQ, + len = Len + 1, messages = queue:in({MsgProps, Msg}, Messages), - confirms = Confirms1 = case eval(NeedsConfirm) of - true -> gb_sets:add(MsgId, Confirms); - _ -> Confirms - end}; + confirms = case eval(NeedsConfirm) of + true -> gb_sets:add(MsgId, Confirms); + _ -> Confirms + end}; + +next_state(S, BQ, {call, _, publish_multiple, [Msg, MsgProps, _BQ, Count]}) -> + #state{len = Len, messages = Messages} = S, + Messages1 = repeat(Messages, fun(Msgs) -> + queue:in({MsgProps, Msg}, Msgs) + end, Count), + S#state{bqstate = BQ, + len = Len + Count, + messages = Messages1}; next_state(S, Res, {call, ?BQMOD, publish_delivered, @@ -173,7 +202,7 @@ next_state(S, Res, }; next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> - #state{messages = Messages, acks = Acks} = S, + #state{len = Len, messages = Messages, acks = Acks} = S, ResultInfo = {call, erlang, element, [1, Res]}, BQ1 = {call, erlang, element, [2, Res]}, AckTag = {call, erlang, element, [3, ResultInfo]}, @@ -182,7 +211,7 @@ next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> {empty, _M2} -> S1; {{value, MsgProp_Msg}, M2} -> - S2 = S1#state{messages = M2}, + S2 = S1#state{len = Len - 1, messages = M2}, case AckReq of true -> S2#state{acks = Acks ++ [{AckTag, MsgProp_Msg}]}; false -> S2 @@ -196,62 +225,82 @@ next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) -> acks = propvals_by_keys(AcksState, AcksArg)}; next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> - #state{messages = Messages, acks = AcksState} = S, + #state{len = Len, messages = Messages, acks = AcksState} = S, BQ1 = {call, erlang, element, [2, Res]}, RequeueMsgs = [proplists:get_value(Key, AcksState) || Key <- AcksArg], S#state{bqstate = BQ1, + len = Len + length(RequeueMsgs), messages = queue:join(Messages, queue:from_list(RequeueMsgs)), acks = propvals_by_keys(AcksState, AcksArg)}; next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) -> S#state{bqstate = BQ}; +next_state(S, Res, {call, ?BQMOD, ram_duration, _Args}) -> + BQ1 = {call, erlang, element, [2, Res]}, + S#state{bqstate = BQ1}; + next_state(S, Res, {call, ?BQMOD, drain_confirmed, _Args}) -> BQ1 = {call, erlang, element, [2, Res]}, S#state{bqstate = BQ1}; next_state(S, BQ1, {call, ?BQMOD, dropwhile, _Args}) -> #state{messages = Messages} = S, - S#state{bqstate = BQ1, messages = drop_messages(Messages)}; + Messages1 = drop_messages(Messages), + S#state{bqstate = BQ1, len = queue:len(Messages1), messages = Messages1}; next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) -> S; +next_state(S, _Res, {call, ?BQMOD, needs_timeout, _Args}) -> + S; + next_state(S, Res, {call, ?BQMOD, purge, _Args}) -> BQ1 = {call, erlang, element, [2, Res]}, - S#state{bqstate = BQ1, messages = queue:new()}. + S#state{bqstate = BQ1, len = 0, messages = queue:new()}. %% Postconditions -postcondition(#state{messages = Messages}, {call, ?BQMOD, fetch, _Args}, Res) -> +postcondition(#state{messages = Messages, len = Len}, + {call, ?BQMOD, fetch, _Args}, Res) -> case Res of {{MsgFetched, _IsDelivered, _AckTag, _Remaining_Len}, _BQ} -> {_MsgProps, Msg} = queue:head(Messages), MsgFetched =:= Msg; {empty, _BQ} -> - queue:len(Messages) =:= 0 + Len =:= 0 end; -postcondition(#state{messages = Messages}, {call, ?BQMOD, purge, _Args}, Res) -> +postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) -> {PurgeCount, _BQ} = Res, - queue:len(Messages) =:= PurgeCount; + Len =:= PurgeCount; -postcondition(#state{messages = Messages}, +postcondition(#state{len = Len}, {call, ?BQMOD, is_empty, _Args}, Res) -> - (queue:len(Messages) =:= 0) =:= Res; + (Len =:= 0) =:= Res; postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) -> #state{confirms = Confirms} = S, {ReportedConfirmed, _BQ} = Res, lists:all(fun (M) -> lists:member(M, Confirms) end, ReportedConfirmed); -postcondition(#state{bqstate = BQ, - messages = Messages}, - {call, ?BQMOD, _Fun, _Args}, _Res) -> - ?BQMOD:len(BQ) =:= queue:len(Messages). +postcondition(#state{bqstate = BQ, len = Len}, {call, _M, _F, _A}, _Res) -> + ?BQMOD:len(BQ) =:= Len. %% Helpers +repeat(Result, _Fun, 0) -> + Result; +repeat(Result, Fun, Times) -> + repeat(Fun(Result), Fun, Times - 1). + +publish_multiple(_Msg, _MsgProps, BQ, 0) -> + BQ; +publish_multiple(Msg, MsgProps, BQ, Count) -> + publish_multiple(Msg, MsgProps, + ?BQMOD:publish(Msg, MsgProps, self(), BQ), + Count - 1). + qc_message_payload() -> ?SIZED(Size, resize(Size * Size, binary())). -- cgit v1.2.1 From cc3e6b3de6814b6dd4ca467da41e4131fd0b3632 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 5 Jul 2011 17:56:57 +0100 Subject: Added timeout testing --- src/rabbit_backing_queue_qc.erl | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index f018a4b8..9aaee160 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -24,6 +24,7 @@ -define(BQMOD, rabbit_variable_queue). -define(QUEUE_MAXLEN, 10000). +-define(TIMEOUT_LIMIT, 100). -define(RECORD_INDEX(Key, Record), erlang:element(2, proplists:lookup(Key, lists:zip( @@ -32,7 +33,7 @@ -export([initial_state/0, command/1, precondition/2, postcondition/3, next_state/3]). --export([prop_backing_queue_test/0, publish_multiple/4]). +-export([prop_backing_queue_test/0, publish_multiple/4, timeout/2]). -record(state, {bqstate, len, %% int @@ -90,8 +91,8 @@ command(S) -> {1, qc_drain_confirmed(S)}, {1, qc_dropwhile(S)}, {1, qc_is_empty(S)}, - {1, qc_needs_timeout(S)}, - {5, qc_purge(S)}]). + {1, qc_timeout(S)}, + {1, qc_purge(S)}]). qc_publish(#state{bqstate = BQ}) -> {call, ?BQMOD, publish, @@ -122,7 +123,7 @@ qc_requeue(#state{bqstate = BQ, acks = Acks}) -> qc_set_ram_duration_target(#state{bqstate = BQ}) -> {call, ?BQMOD, set_ram_duration_target, - [oneof([0, 1, 100, resize(1000, pos_integer()), infinity]), BQ]}. + [oneof([0, 1, 2, resize(1000, pos_integer()), infinity]), BQ]}. qc_ram_duration(#state{bqstate = BQ}) -> {call, ?BQMOD, ram_duration, [BQ]}. @@ -136,8 +137,8 @@ qc_dropwhile(#state{bqstate = BQ}) -> qc_is_empty(#state{bqstate = BQ}) -> {call, ?BQMOD, is_empty, [BQ]}. -qc_needs_timeout(#state{bqstate = BQ}) -> - {call, ?BQMOD, needs_timeout, [BQ]}. +qc_timeout(#state{bqstate = BQ}) -> + {call, ?MODULE, timeout, [BQ, ?TIMEOUT_LIMIT]}. qc_purge(#state{bqstate = BQ}) -> {call, ?BQMOD, purge, [BQ]}. @@ -152,6 +153,8 @@ precondition(#state{messages = Messages}, queue:is_empty(Messages); precondition(_S, {call, ?BQMOD, _Fun, _Arg}) -> true; +precondition(_S, {call, ?MODULE, timeout, _Arg}) -> + true; precondition(#state{len = Len}, {call, ?MODULE, publish_multiple, _Arg}) -> Len < ?QUEUE_MAXLEN. @@ -252,8 +255,8 @@ next_state(S, BQ1, {call, ?BQMOD, dropwhile, _Args}) -> next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) -> S; -next_state(S, _Res, {call, ?BQMOD, needs_timeout, _Args}) -> - S; +next_state(S, BQ, {call, ?MODULE, timeout, _Args}) -> + S#state{bqstate = BQ}; next_state(S, Res, {call, ?BQMOD, purge, _Args}) -> BQ1 = {call, erlang, element, [2, Res]}, @@ -301,6 +304,14 @@ publish_multiple(Msg, MsgProps, BQ, Count) -> ?BQMOD:publish(Msg, MsgProps, self(), BQ), Count - 1). +timeout(BQ, 0) -> + BQ; +timeout(BQ, AtMost) -> + case rabbit_variable_queue:needs_timeout(BQ) of + false -> BQ; + _ -> timeout(rabbit_variable_queue:timeout(BQ), AtMost - 1) + end. + qc_message_payload() -> ?SIZED(Size, resize(Size * Size, binary())). -- cgit v1.2.1 From 4090592bac7107e5a4480def2432b2638ce5618e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 5 Jul 2011 18:30:53 +0100 Subject: Inline rabbit_misc:table_filter, then fiddle with it until it's actually right. To say this could do with some beautification is an understatement. --- src/rabbit_binding.erl | 54 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 5873537c..21251dae 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -105,25 +105,41 @@ recover(XNames, QNames) -> (_Route, false) -> ok end, rabbit_durable_route), - rabbit_misc:table_filter( - fun (#route{binding = #binding{destination = Dst = - #resource{kind = Kind}}}) -> - sets:is_element(Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end) - end, - fun (R = #route{binding = B = #binding{source = Src}}, Tx) -> - {ok, X} = rabbit_exchange:lookup(Src), - Serial = case Tx of - true -> ok = sync_transient_route( - R, fun mnesia:write/3), - transaction; - false -> rabbit_exchange:serial(X) - end, - rabbit_exchange:callback(X, add_binding, [Serial, X, B]) - end, - rabbit_semi_durable_route), + lists:foldl( + fun (R = #route{binding = B = #binding{source = Src, + destination = Dst = + #resource{kind = Kind}}}, Acc) -> + case rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:match_object( + rabbit_semi_durable_route, R, read) =/= [] + andalso sets:is_element( + Dst, case Kind of + exchange -> XNameSet; + queue -> QNameSet + end) of + false -> false; + true -> {ok, X} = rabbit_exchange:lookup(Src), + {true, rabbit_exchange:serial(X)} + end + end, + fun (false, _Tx) -> + false; + ({true, Serial0}, Tx) -> + Serial = case Tx of + true -> ok = sync_transient_route( + R, fun mnesia:write/3), + transaction; + false -> Serial0 + end, + {ok, X} = rabbit_exchange:lookup(Src), + rabbit_exchange:callback(X, add_binding, [Serial, X, B]), + true + end) of + false -> Acc; + true -> [R | Acc] + end + end, [], rabbit_misc:dirty_read_all(rabbit_semi_durable_route)), ok. exists(Binding) -> -- cgit v1.2.1 From 2888edcdf93b87a205d5444c2c7d153746af9a23 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Wed, 6 Jul 2011 14:41:47 +0100 Subject: Minor refactor --- src/rabbit_backing_queue_qc.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index 9aaee160..6424976a 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -38,7 +38,7 @@ -record(state, {bqstate, len, %% int messages, %% queue of {msg_props, basic_msg} - acks, %% list of {acktag, {message_props, basic_msg}} + acks, %% list of {acktag, {msg_props, basic_msg}} confirms}). %% set of msgid %% Initialise model @@ -297,12 +297,10 @@ repeat(Result, _Fun, 0) -> repeat(Result, Fun, Times) -> repeat(Fun(Result), Fun, Times - 1). -publish_multiple(_Msg, _MsgProps, BQ, 0) -> - BQ; publish_multiple(Msg, MsgProps, BQ, Count) -> - publish_multiple(Msg, MsgProps, - ?BQMOD:publish(Msg, MsgProps, self(), BQ), - Count - 1). + repeat(BQ, fun(BQ1) -> + ?BQMOD:publish(Msg, MsgProps, self(), BQ1) + end, Count). timeout(BQ, 0) -> BQ; -- cgit v1.2.1 From 365b62015bc138195fd389919e6572b55d786b6a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Jul 2011 15:46:21 +0100 Subject: Refactor / beautification. --- src/rabbit_binding.erl | 63 +++++++++++++++++++++----------------------------- 1 file changed, 26 insertions(+), 37 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 21251dae..11887944 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -94,8 +94,6 @@ routing_key, arguments]). recover(XNames, QNames) -> - XNameSet = sets:from_list(XNames), - QNameSet = sets:from_list(QNames), rabbit_misc:table_filter( fun (Route) -> mnesia:read({rabbit_semi_durable_route, Route}) =:= [] @@ -105,43 +103,34 @@ recover(XNames, QNames) -> (_Route, false) -> ok end, rabbit_durable_route), - lists:foldl( - fun (R = #route{binding = B = #binding{source = Src, - destination = Dst = - #resource{kind = Kind}}}, Acc) -> - case rabbit_misc:execute_mnesia_transaction( - fun () -> - case mnesia:match_object( - rabbit_semi_durable_route, R, read) =/= [] - andalso sets:is_element( - Dst, case Kind of - exchange -> XNameSet; - queue -> QNameSet - end) of - false -> false; - true -> {ok, X} = rabbit_exchange:lookup(Src), - {true, rabbit_exchange:serial(X)} - end - end, - fun (false, _Tx) -> - false; - ({true, Serial0}, Tx) -> - Serial = case Tx of - true -> ok = sync_transient_route( - R, fun mnesia:write/3), - transaction; - false -> Serial0 - end, - {ok, X} = rabbit_exchange:lookup(Src), - rabbit_exchange:callback(X, add_binding, [Serial, X, B]), - true - end) of - false -> Acc; - true -> [R | Acc] - end - end, [], rabbit_misc:dirty_read_all(rabbit_semi_durable_route)), + XNameSet = sets:from_list(XNames), + QNameSet = sets:from_list(QNames), + [recover_semi_durable_route(R, set(destination(R), XNameSet, QNameSet)) || + R <- rabbit_misc:dirty_read_all(rabbit_semi_durable_route)], ok. +recover_semi_durable_route(R = #route{binding = B}, ToRecover) -> + #binding{source = Src, destination = Dst} = B, + {ok, X} = rabbit_exchange:lookup(Src), + rabbit_misc:execute_mnesia_transaction( + fun () -> + case mnesia:match_object( + rabbit_semi_durable_route, R, read) =/= [] andalso + sets:is_element(Dst, ToRecover) of + false -> no_recover; + true -> ok = sync_transient_route(R, fun mnesia:write/3), + rabbit_exchange:serial(X) + end + end, + fun (no_recover, _) -> ok; + (_Serial, true) -> x_callback(transaction, X, add_binding, B); + (Serial, false) -> x_callback(Serial, X, add_binding, B) + end). + +destination(#route{binding = #binding{destination = D}}) -> D. +set(#resource{kind = exchange}, XNameSet, _) -> XNameSet; +set(#resource{kind = queue}, _, QNameSet) -> QNameSet. + exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> -- cgit v1.2.1 From fc88a5e37bcd0eb4c805d0b74a0b23cb1d3f89dd Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 6 Jul 2011 16:00:48 +0100 Subject: One last tweak. --- src/rabbit_binding.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index 11887944..b88e6977 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -114,9 +114,8 @@ recover_semi_durable_route(R = #route{binding = B}, ToRecover) -> {ok, X} = rabbit_exchange:lookup(Src), rabbit_misc:execute_mnesia_transaction( fun () -> - case mnesia:match_object( - rabbit_semi_durable_route, R, read) =/= [] andalso - sets:is_element(Dst, ToRecover) of + Rs = mnesia:match_object(rabbit_semi_durable_route, R, read), + case Rs =/= [] andalso sets:is_element(Dst, ToRecover) of false -> no_recover; true -> ok = sync_transient_route(R, fun mnesia:write/3), rabbit_exchange:serial(X) -- cgit v1.2.1 From 8f8faabe84201fb68c4644b37ac7de498f3f0d39 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 6 Jul 2011 17:36:22 +0100 Subject: make 'tx.commit-ok' indicate responsibility transfer This is now very close to the previous tx semantics. The downsides w.r.t. the previous state of this branch are - a bit more code, though only in the channel - tx and confirm mode no longer compose - tx always carries the cost of confirms Implementation notes: - The channel must remain active while a commit waits for confirms from queues to trickle in. We achieve this by recording the fact that we have a pending commit. - The trigger for sending the commit-ok is that the number of pending confirms drops to zero and we have a pending commit. - We check for that condition in three places: a) at the point of commit (in case the tx contains no publishes or all confirmations happen as part of delivering the messages to queues) b) where we would normally send basic.acks c) where we would normally send basic.nacks - we are re-using the same logic/state as for 'proper' confirms, except we suppress the sending of acks/nacks - handling the failure case is slightly awkward. We record a tx as 'failed' as soon as we encounter the 'nack' case. Subsequently, when the aforementioned triggering condition is met instead of sending a tx.commit-ok we send a precondition_failed error. But we can't just employ rabbit_misc:protocol_error here since that only works in the context of handling an AMQP method, which is only the case for (a). So instead we drop to a slightly lower API level and rely on the fact precondition_failed is a channel-level, rather than connection-level error. --- src/rabbit_channel.erl | 116 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 76 insertions(+), 40 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 81a0ee80..0c211b46 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -30,7 +30,7 @@ prioritise_cast/2]). -record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, - limiter_pid, start_limiter_fun, tx_enabled, next_tag, + limiter_pid, start_limiter_fun, tx_status, next_tag, unacked_message_q, uncommitted_message_q, uncommitted_ack_q, user, virtual_host, most_recently_declared_queue, consumer_mapping, blocking, consumer_monitors, queue_collector_pid, @@ -174,7 +174,7 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, conn_pid = ConnPid, limiter_pid = undefined, start_limiter_fun = StartLimiterFun, - tx_enabled = false, + tx_status = none, next_tag = 1, unacked_message_q = queue:new(), uncommitted_message_q = queue:new(), @@ -516,10 +516,6 @@ queue_blocked(QPid, State = #ch{blocking = Blocking}) -> State#ch{blocking = Blocking1} end. -blind_confirm({#delivery{message = #basic_message{exchange_name = XName}, - msg_seq_no = MsgSeqNo}, _QNames}, State) -> - record_confirm(MsgSeqNo, XName, State). - record_confirm(undefined, _, State) -> State; record_confirm(MsgSeqNo, XName, State) -> @@ -598,6 +594,15 @@ handle_method(#'channel.close'{}, _, State = #ch{reader_pid = ReaderPid}) -> ReaderPid ! {channel_closing, self()}, {noreply, State1}; +%% Even though the spec prohibits the client from sending commands +%% while waiting for the reply to a synchronous command, we generally +%% do allow this...except in the case of a pending tx.commit, where +%% it could wreak havoc. +handle_method(_Method, _, #ch{tx_status = TxStatus}) + when TxStatus =/= none andalso TxStatus =/= in_progress -> + rabbit_misc:protocol_error( + channel_error, "unexpected command while processing 'tx.commit'", []); + handle_method(#'access.request'{},_, State) -> {reply, #'access.request_ok'{ticket = 1}, State}; @@ -606,7 +611,7 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, mandatory = Mandatory, immediate = Immediate}, Content, State = #ch{virtual_host = VHostPath, - tx_enabled = TxEnabled, + tx_status = TxStatus, confirm_enabled = ConfirmEnabled, trace_state = TraceState}) -> ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin), @@ -618,10 +623,10 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, DecodedContent = rabbit_binary_parser:ensure_content_decoded(Content), check_user_id_header(DecodedContent#content.properties, State), {MsgSeqNo, State1} = - case ConfirmEnabled of - false -> {undefined, State}; - true -> SeqNo = State#ch.publish_seqno, - {SeqNo, State#ch{publish_seqno = SeqNo + 1}} + case {TxStatus, ConfirmEnabled} of + {none, false} -> {undefined, State}; + {_, _} -> SeqNo = State#ch.publish_seqno, + {SeqNo, State#ch{publish_seqno = SeqNo + 1}} end, case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of {ok, Message} -> @@ -629,13 +634,13 @@ handle_method(#'basic.publish'{exchange = ExchangeNameBin, Delivery = rabbit_basic:delivery(Mandatory, Immediate, Message, MsgSeqNo), QNames = rabbit_exchange:route(Exchange, Delivery), - State2 = case TxEnabled of - true -> TMQ = State1#ch.uncommitted_message_q, - NewTMQ = queue:in({Delivery, QNames}, TMQ), - State1#ch{uncommitted_message_q = NewTMQ}; - false -> deliver_to_queues({Delivery, QNames}, State1) - end, - {noreply, State2}; + {noreply, + case TxStatus of + none -> deliver_to_queues({Delivery, QNames}, State1); + in_progress -> TMQ = State1#ch.uncommitted_message_q, + NewTMQ = queue:in({Delivery, QNames}, TMQ), + State1#ch{uncommitted_message_q = NewTMQ} + end}; {error, Reason} -> rabbit_misc:protocol_error(precondition_failed, "invalid message: ~p", [Reason]) @@ -650,15 +655,15 @@ handle_method(#'basic.nack'{delivery_tag = DeliveryTag, handle_method(#'basic.ack'{delivery_tag = DeliveryTag, multiple = Multiple}, _, State = #ch{unacked_message_q = UAMQ, - tx_enabled = TxEnabled}) -> + tx_status = TxStatus}) -> {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple), State1 = State#ch{unacked_message_q = Remaining}, - {noreply, case TxEnabled of - true -> NewTAQ = queue:join(State1#ch.uncommitted_ack_q, - Acked), - State1#ch{uncommitted_ack_q = NewTAQ}; - false -> ack(Acked, State1) - end}; + {noreply, + case TxStatus of + none -> ack(Acked, State1); + in_progress -> NewTAQ = queue:join(State1#ch.uncommitted_ack_q, Acked), + State1#ch{uncommitted_ack_q = NewTAQ} + end}; handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck}, @@ -1039,28 +1044,35 @@ handle_method(#'queue.purge'{queue = QueueNameBin, return_ok(State, NoWait, #'queue.purge_ok'{message_count = PurgedMessageCount}); +handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) -> + rabbit_misc:protocol_error( + precondition_failed, "cannot switch from confirm to tx mode", []); + handle_method(#'tx.select'{}, _, State) -> - {reply, #'tx.select_ok'{}, State#ch{tx_enabled = true}}; + {reply, #'tx.select_ok'{}, State#ch{tx_status = in_progress}}; -handle_method(#'tx.commit'{}, _, #ch{tx_enabled = false}) -> +handle_method(#'tx.commit'{}, _, #ch{tx_status = none}) -> rabbit_misc:protocol_error( precondition_failed, "channel is not transactional", []); handle_method(#'tx.commit'{}, _, State = #ch{uncommitted_message_q = TMQ, uncommitted_ack_q = TAQ}) -> - State1 = rabbit_misc:queue_fold(fun deliver_to_queues/2, State, TMQ), - {reply, #'tx.commit_ok'{}, new_tx(ack(TAQ, State1))}; + State1 = new_tx(ack(TAQ, rabbit_misc:queue_fold(fun deliver_to_queues/2, + State, TMQ))), + {noreply, maybe_complete_tx(State1#ch{tx_status = committing})}; -handle_method(#'tx.rollback'{}, _, #ch{tx_enabled = false}) -> +handle_method(#'tx.rollback'{}, _, #ch{tx_status = none}) -> rabbit_misc:protocol_error( precondition_failed, "channel is not transactional", []); handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ, - uncommitted_message_q = TMQ, uncommitted_ack_q = TAQ}) -> - State1 = rabbit_misc:queue_fold(fun blind_confirm/2, State, TMQ), - {reply, #'tx.rollback_ok'{}, new_tx(State1#ch{unacked_message_q = - queue:join(TAQ, UAMQ)})}; + {reply, #'tx.rollback_ok'{}, new_tx(State#ch{unacked_message_q = + queue:join(TAQ, UAMQ)})}; + +handle_method(#'confirm.select'{}, _, #ch{tx_status = in_progress}) -> + rabbit_misc:protocol_error( + precondition_failed, "cannot switch from tx to confirm mode", []); handle_method(#'confirm.select'{nowait = NoWait}, _, State) -> return_ok(State#ch{confirm_enabled = true}, @@ -1126,7 +1138,7 @@ handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> State1 = State#ch{unconfirmed_qm = gb_trees:delete_any(QPid, UQM)}, {Nack, SendFun} = case Reason of normal -> {false, fun record_confirms/2}; - _ -> {true, fun send_nacks/2} + _ -> {true, fun send_nacks/2} end, {MXs, State2} = process_confirms(MsgSeqNos, QPid, Nack, State1), erase_queue_stats(QPid), @@ -1348,20 +1360,25 @@ lock_message(false, _MsgStruct, State) -> send_nacks([], State) -> State; -send_nacks(MXs, State) -> +send_nacks(MXs, State = #ch{tx_status = none}) -> MsgSeqNos = [ MsgSeqNo || {MsgSeqNo, _} <- MXs ], coalesce_and_send(MsgSeqNos, fun(MsgSeqNo, Multiple) -> #'basic.nack'{delivery_tag = MsgSeqNo, multiple = Multiple} - end, State). + end, State); +send_nacks(_, State) -> + maybe_complete_tx(State#ch{tx_status = failed}). -send_confirms(State = #ch{confirmed = C}) -> +send_confirms(State = #ch{tx_status = none, confirmed = C}) -> C1 = lists:append(C), MsgSeqNos = [ begin maybe_incr_stats([{ExchangeName, 1}], confirm, State), MsgSeqNo end || {MsgSeqNo, ExchangeName} <- C1 ], - send_confirms(MsgSeqNos, State #ch{confirmed = []}). + send_confirms(MsgSeqNos, State #ch{confirmed = []}); +send_confirms(State) -> + maybe_complete_tx(State). + send_confirms([], State) -> State; send_confirms([MsgSeqNo], State = #ch{writer_pid = WriterPid}) -> @@ -1391,6 +1408,25 @@ coalesce_and_send(MsgSeqNos, MkMsgFun, WriterPid, MkMsgFun(SeqNo, false)) || SeqNo <- Ss], State. +maybe_complete_tx(State = #ch{tx_status = in_progress}) -> + State; +maybe_complete_tx(State = #ch{unconfirmed_mq = UMQ}) -> + case gb_trees:is_empty(UMQ) of + false -> State; + true -> complete_tx(State#ch{confirmed = []}) + end. + +complete_tx(State = #ch{tx_status = committing}) -> + ok = rabbit_writer:send_command(State#ch.writer_pid, #'tx.commit_ok'{}), + State#ch{tx_status = in_progress}; +complete_tx(State = #ch{tx_status = failed}) -> + {noreply, State1} = send_exception( + rabbit_misc:amqp_error( + precondition_failed, "partial tx completion", [], + 'tx.commit'), + State), + State1#ch{tx_status = in_progress}. + infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items]. i(pid, _) -> self(); @@ -1398,7 +1434,7 @@ i(connection, #ch{conn_pid = ConnPid}) -> ConnPid; i(number, #ch{channel = Channel}) -> Channel; i(user, #ch{user = User}) -> User#user.username; i(vhost, #ch{virtual_host = VHost}) -> VHost; -i(transactional, #ch{tx_enabled = TE}) -> TE; +i(transactional, #ch{tx_status = TE}) -> TE =/= none; i(confirm, #ch{confirm_enabled = CE}) -> CE; i(consumer_count, #ch{consumer_mapping = ConsumerMapping}) -> dict:size(ConsumerMapping); -- cgit v1.2.1 From 8f136d8ead5b9a9ce9bea51eb1535dee065227ad Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Wed, 6 Jul 2011 17:40:37 +0100 Subject: ditch per-x-q confirm stats They don't work properly (see comment in code) and are emitted in a very awkward place. And they expose implementation detail - there is no a priori reason why queues should be involved in confirms - and are of questionable utility. --- src/rabbit_channel.erl | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 0c211b46..84728980 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -539,17 +539,13 @@ process_confirms(MsgSeqNos, QPid, Nack, State = #ch{unconfirmed_mq = UMQ, fun(MsgSeqNo, {_MXs, UMQ0, _UQM} = Acc) -> case gb_trees:lookup(MsgSeqNo, UMQ0) of {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ, - Acc, Nack, State); + Acc, Nack); none -> Acc end end, {[], UMQ, UQM}, MsgSeqNos), {MXs, State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}}. -remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack, - State) -> - %% these confirms will be emitted even when a queue dies, but that - %% should be fine, since the queue stats get erased immediately - maybe_incr_stats([{{QPid, XName}, 1}], confirm, State), +remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack) -> UQM1 = case gb_trees:lookup(QPid, UQM) of {value, MsgSeqNos} -> MsgSeqNos1 = gb_sets:delete(MsgSeqNo, MsgSeqNos), -- cgit v1.2.1 From 64e2dc821b174af1f324bb7ae634b0d30d9214e0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Jul 2011 11:58:20 +0100 Subject: Fix upgrades from 2.4.1 and before. This failed because the #exchange{} record now contains the exchange scratch space, but at the point at which we add trace exchanges Mnesia hasn't got there yet. The moral of this story is: never use record definitions in rabbit_upgrade_functions (which is why all the transform() invocations don't). --- src/rabbit_upgrade_functions.erl | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index acf45bf3..d8c53c7a 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -16,8 +16,6 @@ -module(rabbit_upgrade_functions). --include("rabbit.hrl"). - -compile([export_all]). -rabbit_upgrade({remove_user_scope, mnesia, []}). @@ -190,11 +188,7 @@ create(Tab, TabDef) -> %% the exchange type registry or worker pool to be running by dint of %% not validating anything and assuming the exchange type does not %% require serialisation. +%% NB: this assumes the pre-exchange-scratch-space format declare_exchange(XName, Type) -> - X = #exchange{name = XName, - type = Type, - durable = true, - auto_delete = false, - internal = false, - arguments = []}, + X = {exchange, XName, Type, true, false, false, []}, ok = mnesia:dirty_write(rabbit_durable_exchange, X). -- cgit v1.2.1 From 213923a4e3ddc67d5649850d967074f33ccf0aeb Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 7 Jul 2011 12:02:51 +0100 Subject: Note to future self / others --- src/rabbit_upgrade_functions.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl index d8c53c7a..8d26866b 100644 --- a/src/rabbit_upgrade_functions.erl +++ b/src/rabbit_upgrade_functions.erl @@ -16,6 +16,9 @@ -module(rabbit_upgrade_functions). +%% If you are tempted to add include("rabbit.hrl"). here, don't. Using record +%% defs here leads to pain later. + -compile([export_all]). -rabbit_upgrade({remove_user_scope, mnesia, []}). -- cgit v1.2.1 From 27a4d83849a745a6eb6432f1b7bae9c2299fefca Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 7 Jul 2011 13:28:32 +0100 Subject: inlining --- src/rabbit_binding.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl index b88e6977..205d5bba 100644 --- a/src/rabbit_binding.erl +++ b/src/rabbit_binding.erl @@ -105,8 +105,12 @@ recover(XNames, QNames) -> end, rabbit_durable_route), XNameSet = sets:from_list(XNames), QNameSet = sets:from_list(QNames), - [recover_semi_durable_route(R, set(destination(R), XNameSet, QNameSet)) || - R <- rabbit_misc:dirty_read_all(rabbit_semi_durable_route)], + SelectSet = fun (#resource{kind = exchange}) -> XNameSet; + (#resource{kind = queue}) -> QNameSet + end, + [recover_semi_durable_route(R, SelectSet(Dst)) || + R = #route{binding = #binding{destination = Dst}} <- + rabbit_misc:dirty_read_all(rabbit_semi_durable_route)], ok. recover_semi_durable_route(R = #route{binding = B}, ToRecover) -> @@ -126,10 +130,6 @@ recover_semi_durable_route(R = #route{binding = B}, ToRecover) -> (Serial, false) -> x_callback(Serial, X, add_binding, B) end). -destination(#route{binding = #binding{destination = D}}) -> D. -set(#resource{kind = exchange}, XNameSet, _) -> XNameSet; -set(#resource{kind = queue}, _, QNameSet) -> QNameSet. - exists(Binding) -> binding_action( Binding, fun (_Src, _Dst, B) -> -- cgit v1.2.1 From 4d84dec33516e7a1b028d476c394047f7bbf7e57 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Fri, 8 Jul 2011 12:35:58 +0100 Subject: cosmetic --- docs/rabbitmqctl.1.xml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml index 71764522..25cc698f 100644 --- a/docs/rabbitmqctl.1.xml +++ b/docs/rabbitmqctl.1.xml @@ -1238,9 +1238,8 @@ - If no channelinfoitems are specified - then pid, user, consumer_count, and - messages_unacknowledged are assumed. + If no channelinfoitems are specified then pid, + user, consumer_count, and messages_unacknowledged are assumed. -- cgit v1.2.1 From 9877096a1cf62423fcbe66bab8f1e74980cc96ca Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 14 Jul 2011 12:51:50 +0100 Subject: cosmetic --- src/rabbit_reader.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index f5214a77..dffabf85 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -677,7 +677,6 @@ handle_method0(#'connection.tune_ok'{frame_max = FrameMax, end; handle_method0(#'connection.open'{virtual_host = VHostPath}, - State = #v1{connection_state = opening, connection = Connection = #connection{ user = User, -- cgit v1.2.1 From 3ceca58444a08af7a5db89896aa177b48573d85d Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 14 Jul 2011 13:29:29 +0100 Subject: fix bug introduced by bug 24216 (tx untangling) forgot to propagate an API change --- src/rabbit_mirror_queue_master.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 41748c15..69be6ecd 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -298,7 +298,7 @@ is_duplicate(Message = #basic_message { id = MsgId }, error -> %% We permit the underlying BQ to have a peek at it, but %% only if we ourselves are not filtering out the msg. - {Result, BQS1} = BQ:is_duplicate(none, Message, BQS), + {Result, BQS1} = BQ:is_duplicate(Message, BQS), {Result, State #state { backing_queue_state = BQS1 }}; {ok, published} -> %% It already got published when we were a slave and no -- cgit v1.2.1 From 8dc1f8ea8de5155a08304e60532b01a7bf48506e Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 15 Jul 2011 14:12:00 +0100 Subject: re-add schema-less nodes --- src/rabbit_mnesia.erl | 54 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 8d5c8646..52103a09 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -429,6 +429,8 @@ delete_previously_running_nodes() -> init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], + IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), + WasDiskNode = mnesia:system_info(use_dir), case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of {ok, Nodes} -> case Force of @@ -442,29 +444,38 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> end; true -> ok end, - case {Nodes, mnesia:system_info(use_dir)} of - {[], false} -> + case {Nodes, WasDiskNode, IsDiskNode} of + {[], false, false} -> + ok = create_tables(false), + ok = rabbit_version:record_desired(); + {[], true, false} -> + rabbit_log:warning("converting from disc to ram node; backing up database"), + move_db(), + ok = create_tables(false), + ok = rabbit_version:record_desired(); + {[], false, _} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[], true} -> + {[], _, true} -> %% We're the first node up case rabbit_upgrade:maybe_upgrade_local() of - ok -> ensure_schema_integrity(); - version_not_available -> ok = schema_ok_or_move() + ok -> + ensure_schema_integrity(); + version_not_available -> + ok = schema_ok_or_move() end, ok; - {[AnotherNode|_], _} -> + {[AnotherNode|_], _, _} -> %% Subsequent node in cluster, catch up ensure_version_ok( rpc:call(AnotherNode, rabbit_version, recorded, [])), - IsDiskNode = ClusterNodes == [] orelse - lists:member(node(), ClusterNodes), ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, disc_copies), - ok = create_local_table_copies(case IsDiskNode of - true -> disc; - false -> ram - end), + CopyType = case IsDiskNode of + true -> disc; + false -> ram + end, + ok = create_local_table_copy(schema, CopyType), + ok = create_local_table_copies(CopyType), ok = SecondaryPostMnesiaFun(), ensure_schema_integrity(), ok @@ -486,7 +497,7 @@ maybe_upgrade_local_or_record_desired() -> schema_ok_or_move() -> case check_schema_integrity() of - ok -> + {true, ok} -> ok; {error, Reason} -> %% NB: we cannot use rabbit_log here since it may not have been @@ -545,13 +556,24 @@ copy_db(Destination) -> rabbit_misc:recursive_copy(dir(), Destination). create_tables() -> + create_tables(true). + +create_tables(IsDiskNode) -> lists:foreach(fun ({Tab, TabDef}) -> TabDef1 = proplists:delete(match, TabDef), - case mnesia:create_table(Tab, TabDef1) of + TabDef2 = case IsDiskNode of + true -> + TabDef1; + false -> + [{disc_copies, []} | + proplists:delete(disc_copies, + TabDef1)] + end, + case mnesia:create_table(Tab, TabDef2) of {atomic, ok} -> ok; {aborted, Reason} -> throw({error, {table_creation_failed, - Tab, TabDef1, Reason}}) + Tab, TabDef2, Reason}}) end end, table_definitions()), -- cgit v1.2.1 From 474c6763d5a5fea241642c2e27bcaeceda4cab68 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 15 Jul 2011 16:40:36 +0100 Subject: cleanup queue and restart app --- src/rabbit_tests.erl | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 63676fef..2542905f 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -60,6 +60,9 @@ all_tests() -> passed = test_confirms(), passed = maybe_run_cluster_dependent_tests(), passed = test_configurable_server_properties(), + ok = cleanup_test_queue(), + ok = restart_app(), + io:format("rabbit app restarted"), passed. maybe_run_cluster_dependent_tests() -> @@ -1911,6 +1914,16 @@ with_empty_test_queue(Fun) -> {0, Qi} = init_test_queue(), rabbit_queue_index:delete_and_terminate(Fun(Qi)). +cleanup_test_queue() -> + %% the test queue's already there; let's remove it + {_, Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), + {ok, _} = rabbit_amqqueue:delete(Q, false, false), + ok. + +restart_app() -> + rabbit:stop(), + rabbit:start(). + queue_index_publish(SeqIds, Persistent, Qi) -> Ref = rabbit_guid:guid(), MsgStore = case Persistent of -- cgit v1.2.1 From 1f45c319d6c1bd42ea0a1cf0eb91335c40430fdb Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 15 Jul 2011 16:42:57 +0100 Subject: remove io:format --- src/rabbit_tests.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 2542905f..615a04c7 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -62,7 +62,6 @@ all_tests() -> passed = test_configurable_server_properties(), ok = cleanup_test_queue(), ok = restart_app(), - io:format("rabbit app restarted"), passed. maybe_run_cluster_dependent_tests() -> -- cgit v1.2.1 From 252f1a5efeda64ca4ccd3ae48c2cad31739357b4 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 15 Jul 2011 17:27:27 +0100 Subject: Correct boilerplate for the HA modules which were started in 2010 but failed to get updated to 2011. --- src/rabbit_mirror_queue_coordinator.erl | 2 +- src/rabbit_mirror_queue_master.erl | 2 +- src/rabbit_mirror_queue_misc.erl | 2 +- src/rabbit_mirror_queue_slave.erl | 2 +- src/rabbit_mirror_queue_slave_sup.erl | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl index a347904c..f6664a27 100644 --- a/src/rabbit_mirror_queue_coordinator.erl +++ b/src/rabbit_mirror_queue_coordinator.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2010-2011 VMware, Inc. All rights reserved. %% -module(rabbit_mirror_queue_coordinator). diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl index 69be6ecd..532911f2 100644 --- a/src/rabbit_mirror_queue_master.erl +++ b/src/rabbit_mirror_queue_master.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2010-2011 VMware, Inc. All rights reserved. %% -module(rabbit_mirror_queue_master). diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl index 4761f79e..6a9f733e 100644 --- a/src/rabbit_mirror_queue_misc.erl +++ b/src/rabbit_mirror_queue_misc.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2010-2011 VMware, Inc. All rights reserved. %% -module(rabbit_mirror_queue_misc). diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 93340ba8..b38a8967 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2010-2011 VMware, Inc. All rights reserved. %% -module(rabbit_mirror_queue_slave). diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl index 2ce5941e..879a6017 100644 --- a/src/rabbit_mirror_queue_slave_sup.erl +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -11,7 +11,7 @@ %% The Original Code is RabbitMQ. %% %% The Initial Developer of the Original Code is VMware, Inc. -%% Copyright (c) 2007-2010 VMware, Inc. All rights reserved. +%% Copyright (c) 2010-2011 VMware, Inc. All rights reserved. %% -module(rabbit_mirror_queue_slave_sup). -- cgit v1.2.1 From fb0af1dafee8e1ef0e3abcbb3e54a378b3b9f3fd Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 15 Jul 2011 17:29:51 +0100 Subject: increase a timeout --- src/rabbit_tests.erl | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 615a04c7..56c7f2f5 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -60,7 +60,6 @@ all_tests() -> passed = test_confirms(), passed = maybe_run_cluster_dependent_tests(), passed = test_configurable_server_properties(), - ok = cleanup_test_queue(), ok = restart_app(), passed. @@ -1913,12 +1912,6 @@ with_empty_test_queue(Fun) -> {0, Qi} = init_test_queue(), rabbit_queue_index:delete_and_terminate(Fun(Qi)). -cleanup_test_queue() -> - %% the test queue's already there; let's remove it - {_, Q} = rabbit_amqqueue:declare(test_queue(), true, false, [], none), - {ok, _} = rabbit_amqqueue:delete(Q, false, false), - ok. - restart_app() -> rabbit:stop(), rabbit:start(). @@ -2162,7 +2155,7 @@ wait_for_confirms(Unconfirmed) -> wait_for_confirms( gb_sets:difference(Unconfirmed, gb_sets:from_list(Confirmed))) - after 1000 -> exit(timeout_waiting_for_confirm) + after 5000 -> exit(timeout_waiting_for_confirm) end end. -- cgit v1.2.1 From 8f910d23934c24f01e4199008727ee6c2cf2cb1d Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Fri, 15 Jul 2011 18:03:46 +0100 Subject: Stronger constraints and some notes on frequency --- src/rabbit_backing_queue_qc.erl | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index d73901da..d616979a 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -27,8 +27,8 @@ -define(TIMEOUT_LIMIT, 100). -define(RECORD_INDEX(Key, Record), - erlang:element(2, proplists:lookup(Key, lists:zip( - record_info(fields, Record), lists:seq(2, record_info(size, Record)))))). + proplists:get_value(Key, lists:zip( + record_info(fields, Record), lists:seq(2, record_info(size, Record))))). -export([initial_state/0, command/1, precondition/2, postcondition/3, next_state/3]). @@ -79,11 +79,17 @@ prop_backing_queue_test() -> %% Commands +%% Command frequencies are tuned so that queues are normally reasonably +%% short, but they may sometimes exceed ?QUEUE_MAXLEN. Publish-multiple +%% and purging cause extreme queue lengths, so these have lower probabilities. +%% Fetches are sufficiently frequent so that commands that need acktags +%% get decent coverage. + command(S) -> frequency([{10, qc_publish(S)}, {1, qc_publish_delivered(S)}, - {1, qc_publish_multiple(S)}, - {15, qc_fetch(S)}, + {1, qc_publish_multiple(S)}, %% very slow + {15, qc_fetch(S)}, %% needed for ack and requeue {15, qc_ack(S)}, {15, qc_requeue(S)}, {3, qc_set_ram_duration_target(S)}, @@ -264,16 +270,24 @@ next_state(S, Res, {call, ?BQMOD, purge, _Args}) -> %% Postconditions -postcondition(#state{messages = Messages, len = Len}, - {call, ?BQMOD, fetch, _Args}, Res) -> +postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) -> + #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S, case Res of - {{MsgFetched, _IsDelivered, _AckTag, _Remaining_Len}, _BQ} -> + {{MsgFetched, _IsDelivered, AckTag, RemainingLen}, _BQ} -> {_MsgProps, Msg} = queue:head(Messages), - MsgFetched =:= Msg; + MsgFetched =:= Msg andalso + not lists:member(AckTag, Acks) andalso + not gb_sets:is_element(AckTag, Confrms) andalso + RemainingLen =:= Len - 1; {empty, _BQ} -> Len =:= 0 end; +postcondition(S, {call, ?BQMOD, publish_delivered, _Args}, {AckTag, _BQ}) -> + #state{acks = Acks, confirms = Confrms} = S, + not lists:member(AckTag, Acks) andalso + not gb_sets:is_element(AckTag, Confrms); + postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) -> {PurgeCount, _BQ} = Res, Len =:= PurgeCount; -- cgit v1.2.1 From a5a02e720d5a4c1acaa5d6b7d7877b2709633add Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Mon, 18 Jul 2011 11:16:13 +0100 Subject: the argument is sometimes 'disc_copies' and not 'disc' --- src/rabbit_mnesia.erl | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 52103a09..2db598c9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -444,6 +444,8 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> end; true -> ok end, + %% We create a new db (on disk, or in ram) in the first + %% three cases and attempt to upgrade the in the other two case {Nodes, WasDiskNode, IsDiskNode} of {[], false, false} -> ok = create_tables(false), @@ -453,28 +455,28 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> move_db(), ok = create_tables(false), ok = rabbit_version:record_desired(); - {[], false, _} -> + {[], false, true} -> %% Nothing there at all, start from scratch ok = create_schema(); - {[], _, true} -> + {[], true, true} -> %% We're the first node up - case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ensure_schema_integrity(); - version_not_available -> - ok = schema_ok_or_move() - end, - ok; + ok = case rabbit_upgrade:maybe_upgrade_local() of + ok -> + ensure_schema_integrity(); + version_not_available -> + schema_ok_or_move() + end; {[AnotherNode|_], _, _} -> %% Subsequent node in cluster, catch up ensure_version_ok( rpc:call(AnotherNode, rabbit_version, recorded, [])), ok = wait_for_replicated_tables(), - CopyType = case IsDiskNode of - true -> disc; - false -> ram - end, - ok = create_local_table_copy(schema, CopyType), + {CopyType, CopyTypeAlt} = + case IsDiskNode of + true -> {disc, disc_copies}; + false -> {ram, ram_copies} + end, + ok = create_local_table_copy(schema, CopyTypeAlt), ok = create_local_table_copies(CopyType), ok = SecondaryPostMnesiaFun(), ensure_schema_integrity(), -- cgit v1.2.1 From be9fb94cf4cbd6415a7d137247305f808cccd3f0 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Mon, 18 Jul 2011 14:08:35 +0100 Subject: cover the missing case --- src/rabbit_mnesia.erl | 13 +++++++------ src/rabbit_tests.erl | 4 +++- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 2db598c9..b38b43c4 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -444,15 +444,15 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> end; true -> ok end, + if (WasDiskNode andalso (not IsDiskNode)) -> + rabbit_log:warning("converting from disc to ram node; backing up database"), + move_db(); + true -> ok + end, %% We create a new db (on disk, or in ram) in the first %% three cases and attempt to upgrade the in the other two case {Nodes, WasDiskNode, IsDiskNode} of - {[], false, false} -> - ok = create_tables(false), - ok = rabbit_version:record_desired(); - {[], true, false} -> - rabbit_log:warning("converting from disc to ram node; backing up database"), - move_db(), + {_, _, false} -> ok = create_tables(false), ok = rabbit_version:record_desired(); {[], false, true} -> @@ -573,6 +573,7 @@ create_tables(IsDiskNode) -> end, case mnesia:create_table(Tab, TabDef2) of {atomic, ok} -> ok; + {aborted, {already_exists, Tab}} -> ok; {aborted, Reason} -> throw({error, {table_creation_failed, Tab, TabDef2, Reason}}) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 63676fef..51185879 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -904,7 +904,6 @@ test_option_parser() -> passed. test_cluster_management() -> - %% 'cluster' and 'reset' should only work if the app is stopped {error, _} = control_action(cluster, []), {error, _} = control_action(reset, []), @@ -1014,6 +1013,9 @@ test_cluster_management2(SecondaryNode) -> "invalid2@invalid"]), %% turn a disk node into a ram node + receive + after 1000 -> ok + end, ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS]), ok = control_action(start_app, []), -- cgit v1.2.1 From efb66a7e5fe0b0fdd29cb6232e9eac5cb9918504 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Mon, 18 Jul 2011 14:09:24 +0100 Subject: appending to a list is slow. And we don't seem to care about this list being ordered --- src/rabbit_backing_queue_qc.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index d616979a..a870ddd5 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -204,11 +204,11 @@ next_state(S, Res, true -> gb_sets:add(MsgId, Confirms); _ -> Confirms end, - acks = Acks ++ case AckReq of - true -> [{AckTag, {MsgProps, Msg}}]; - false -> [] - end - }; + acks = case AckReq of + true -> [{AckTag, {MsgProps, Msg}} | Acks]; + false -> Acks + end + }; next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> #state{len = Len, messages = Messages, acks = Acks} = S, @@ -222,7 +222,7 @@ next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> {{value, MsgProp_Msg}, M2} -> S2 = S1#state{len = Len - 1, messages = M2}, case AckReq of - true -> S2#state{acks = Acks ++ [{AckTag, MsgProp_Msg}]}; + true -> S2#state{acks = [{AckTag, MsgProp_Msg} | Acks]}; false -> S2 end end; -- cgit v1.2.1 From 1e67cf6b1c5292e0c553983ff6f936724b93359d Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Mon, 18 Jul 2011 16:04:13 +0100 Subject: there are many ways for a queue to die nicely --- src/rabbit_channel.erl | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 8310bd8e..65905906 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1131,10 +1131,13 @@ handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> %% process_confirms to prevent each MsgSeqNo being removed from %% the set one by one which which would be inefficient State1 = State#ch{unconfirmed_qm = gb_trees:delete_any(QPid, UQM)}, - {Nack, SendFun} = case Reason of - normal -> {false, fun record_confirms/2}; - _ -> {true, fun send_nacks/2} - end, + {Nack, SendFun} = + if (Reason =:= noproc orelse Reason =:= nodedown orelse + Reason =:= normal orelse Reason =:= shutdown) -> + {false, fun record_confirms/2}; + true -> + {true, fun send_nacks/2} + end, {MXs, State2} = process_confirms(MsgSeqNos, QPid, Nack, State1), erase_queue_stats(QPid), State3 = SendFun(MXs, State2), -- cgit v1.2.1 From 133316568d7e38aeccfa2ca53438667286b6dc6b Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Mon, 18 Jul 2011 17:51:09 +0100 Subject: more the restart_app to a more appropriate place --- src/rabbit_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 56c7f2f5..3008c74c 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -60,7 +60,6 @@ all_tests() -> passed = test_confirms(), passed = maybe_run_cluster_dependent_tests(), passed = test_configurable_server_properties(), - ok = restart_app(), passed. maybe_run_cluster_dependent_tests() -> @@ -1673,6 +1672,7 @@ test_backing_queue() -> passed = test_queue_recover(), application:set_env(rabbit, queue_index_max_journal_entries, MaxJournal, infinity), + ok = restart_app(), %% reset rabbit_sup's restart order passed; _ -> passed -- cgit v1.2.1 From 0444295d7dfb4d54c58b30ded369e06b1eeb5eb0 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Mon, 18 Jul 2011 18:34:53 +0100 Subject: Bigger comment. --- src/rabbit_tests.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 3008c74c..2e454411 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1672,7 +1672,10 @@ test_backing_queue() -> passed = test_queue_recover(), application:set_env(rabbit, queue_index_max_journal_entries, MaxJournal, infinity), - ok = restart_app(), %% reset rabbit_sup's restart order + %% We will have restarted the message store, and thus changed + %% the order of the children of rabbit_sup. This will cause + %% problems if there are subsequent failures - see bug 24262. + ok = restart_app(), passed; _ -> passed -- cgit v1.2.1 From 19e1278a72feb7101ae5f1af08ef32d9a7e55ff2 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Mon, 18 Jul 2011 18:36:00 +0100 Subject: Speed improvements --- src/rabbit_backing_queue_qc.erl | 82 +++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index a870ddd5..1afe20ce 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -38,7 +38,7 @@ -record(state, {bqstate, len, %% int messages, %% queue of {msg_props, basic_msg} - acks, %% list of {acktag, {msg_props, basic_msg}} + acks, %% dict of acktag => {msg_props, basic_msg} confirms}). %% set of msgid %% Initialise model @@ -47,35 +47,36 @@ initial_state() -> #state{bqstate = qc_variable_queue_init(qc_test_queue()), len = 0, messages = queue:new(), - acks = [], + acks = orddict:new(), confirms = gb_sets:new()}. %% Property prop_backing_queue_test() -> ?FORALL(Cmds, commands(?MODULE, initial_state()), - begin - {ok, FileSizeLimit} = - application:get_env(rabbit, msg_store_file_size_limit), - application:set_env(rabbit, msg_store_file_size_limit, 512, - infinity), - {ok, MaxJournal} = - application:get_env(rabbit, queue_index_max_journal_entries), - application:set_env(rabbit, queue_index_max_journal_entries, 128, - infinity), - - {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds), - - application:set_env(rabbit, msg_store_file_size_limit, - FileSizeLimit, infinity), - application:set_env(rabbit, queue_index_max_journal_entries, - MaxJournal, infinity), - - rabbit_variable_queue:delete_and_terminate(shutdown, BQ), - ?WHENFAIL( - io:format("Result: ~p~n", [Res]), - aggregate(command_names(Cmds), Res =:= ok)) - end). + backing_queue_test(Cmds)). + +backing_queue_test(Cmds) -> + {ok, FileSizeLimit} = + application:get_env(rabbit, msg_store_file_size_limit), + application:set_env(rabbit, msg_store_file_size_limit, 512, + infinity), + {ok, MaxJournal} = + application:get_env(rabbit, queue_index_max_journal_entries), + application:set_env(rabbit, queue_index_max_journal_entries, 128, + infinity), + + {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds), + + application:set_env(rabbit, msg_store_file_size_limit, + FileSizeLimit, infinity), + application:set_env(rabbit, queue_index_max_journal_entries, + MaxJournal, infinity), + + rabbit_variable_queue:delete_and_terminate(shutdown, BQ), + ?WHENFAIL( + io:format("Result: ~p~n", [Res]), + aggregate(command_names(Cmds), Res =:= ok)). %% Commands @@ -121,11 +122,11 @@ qc_fetch(#state{bqstate = BQ}) -> {call, ?BQMOD, fetch, [boolean(), BQ]}. qc_ack(#state{bqstate = BQ, acks = Acks}) -> - {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), BQ]}. + {call, ?BQMOD, ack, [rand_choice(orddict:fetch_keys(Acks)), BQ]}. qc_requeue(#state{bqstate = BQ, acks = Acks}) -> {call, ?BQMOD, requeue, - [rand_choice(proplists:get_keys(Acks)), fun(MsgOpts) -> MsgOpts end, BQ]}. + [rand_choice(orddict:fetch_keys(Acks)), fun(MsgOpts) -> MsgOpts end, BQ]}. qc_set_ram_duration_target(#state{bqstate = BQ}) -> {call, ?BQMOD, set_ram_duration_target, @@ -153,7 +154,7 @@ qc_purge(#state{bqstate = BQ}) -> precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg}) when Fun =:= ack; Fun =:= requeue -> - length(Acks) > 0; + orddict:size(Acks) > 0; precondition(#state{messages = Messages}, {call, ?BQMOD, publish_delivered, _Arg}) -> queue:is_empty(Messages); @@ -205,7 +206,7 @@ next_state(S, Res, _ -> Confirms end, acks = case AckReq of - true -> [{AckTag, {MsgProps, Msg}} | Acks]; + true -> orddict:append(AckTag, {MsgProps, Msg}, Acks); false -> Acks end }; @@ -222,7 +223,7 @@ next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> {{value, MsgProp_Msg}, M2} -> S2 = S1#state{len = Len - 1, messages = M2}, case AckReq of - true -> S2#state{acks = [{AckTag, MsgProp_Msg} | Acks]}; + true -> S2#state{acks = orddict:append(AckTag, MsgProp_Msg, Acks)}; false -> S2 end end; @@ -231,16 +232,20 @@ next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) -> #state{acks = AcksState} = S, BQ1 = {call, erlang, element, [2, Res]}, S#state{bqstate = BQ1, - acks = propvals_by_keys(AcksState, AcksArg)}; + acks = orddict:filter(fun (AckTag, _) -> + not lists:member(AckTag, AcksArg) + end, AcksState)}; next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> #state{len = Len, messages = Messages, acks = AcksState} = S, BQ1 = {call, erlang, element, [2, Res]}, - RequeueMsgs = [proplists:get_value(Key, AcksState) || Key <- AcksArg], + RequeueMsgs = lists:append([orddict:fetch(Key, AcksState) || Key <- AcksArg]), S#state{bqstate = BQ1, len = Len + length(RequeueMsgs), messages = queue:join(Messages, queue:from_list(RequeueMsgs)), - acks = propvals_by_keys(AcksState, AcksArg)}; + acks = orddict:filter(fun (AckTag, _) -> + not lists:member(AckTag, AcksArg) + end, AcksState)}; next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) -> S#state{bqstate = BQ}; @@ -276,7 +281,7 @@ postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) -> {{MsgFetched, _IsDelivered, AckTag, RemainingLen}, _BQ} -> {_MsgProps, Msg} = queue:head(Messages), MsgFetched =:= Msg andalso - not lists:member(AckTag, Acks) andalso + not orddict:is_key(AckTag, Acks) andalso not gb_sets:is_element(AckTag, Confrms) andalso RemainingLen =:= Len - 1; {empty, _BQ} -> @@ -285,7 +290,7 @@ postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) -> postcondition(S, {call, ?BQMOD, publish_delivered, _Args}, {AckTag, _BQ}) -> #state{acks = Acks, confirms = Confrms} = S, - not lists:member(AckTag, Acks) andalso + not orddict:is_key(AckTag, Acks) andalso not gb_sets:is_element(AckTag, Confrms); postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) -> @@ -299,7 +304,9 @@ postcondition(#state{len = Len}, postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) -> #state{confirms = Confirms} = S, {ReportedConfirmed, _BQ} = Res, - lists:all(fun (M) -> lists:member(M, Confirms) end, ReportedConfirmed); + lists:all(fun (M) -> + gb_sets:is_element(M, Confirms) + end, ReportedConfirmed); postcondition(#state{bqstate = BQ, len = Len}, {call, _M, _F, _A}, _Res) -> ?BQMOD:len(BQ) =:= Len. @@ -363,11 +370,6 @@ qc_test_queue(Durable) -> arguments = [], pid = self()}. -propvals_by_keys(Props, Keys) -> - lists:filter(fun ({Key, _Msg}) -> - not lists:member(Key, Keys) - end, Props). - rand_choice([]) -> []; rand_choice(List) -> [lists:nth(random:uniform(length(List)), List)]. -- cgit v1.2.1 From 31cdc2f23f169beff7ba25e64746a0c4f335dd57 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 19 Jul 2011 09:34:19 +0100 Subject: catch another two reasons --- src/rabbit_channel.erl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 65905906..f398fcc5 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -1132,10 +1132,13 @@ handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) -> %% the set one by one which which would be inefficient State1 = State#ch{unconfirmed_qm = gb_trees:delete_any(QPid, UQM)}, {Nack, SendFun} = - if (Reason =:= noproc orelse Reason =:= nodedown orelse - Reason =:= normal orelse Reason =:= shutdown) -> + case Reason of + Reason when Reason =:= noproc; Reason =:= noconnection; + Reason =:= normal; Reason =:= shutdown -> {false, fun record_confirms/2}; - true -> + {shutdown, _} -> + {false, fun record_confirms/2}; + _ -> {true, fun send_nacks/2} end, {MXs, State2} = process_confirms(MsgSeqNos, QPid, Nack, State1), -- cgit v1.2.1 From 4f9b1e18de0882229ffea44e2783177c60a666f2 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 19 Jul 2011 09:47:06 +0100 Subject: Cosmetic --- src/rabbit_backing_queue_qc.erl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index 1afe20ce..e372e351 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -223,8 +223,10 @@ next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) -> {{value, MsgProp_Msg}, M2} -> S2 = S1#state{len = Len - 1, messages = M2}, case AckReq of - true -> S2#state{acks = orddict:append(AckTag, MsgProp_Msg, Acks)}; - false -> S2 + true -> + S2#state{acks = orddict:append(AckTag, MsgProp_Msg, Acks)}; + false -> + S2 end end; @@ -239,7 +241,8 @@ next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) -> next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> #state{len = Len, messages = Messages, acks = AcksState} = S, BQ1 = {call, erlang, element, [2, Res]}, - RequeueMsgs = lists:append([orddict:fetch(Key, AcksState) || Key <- AcksArg]), + RequeueMsgs = lists:append([orddict:fetch(Key, AcksState) || + Key <- AcksArg]), S#state{bqstate = BQ1, len = Len + length(RequeueMsgs), messages = queue:join(Messages, queue:from_list(RequeueMsgs)), -- cgit v1.2.1 From fc007bd170ce1a6102ca3a57f8c24a61a04ab7ef Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 19 Jul 2011 14:55:13 +0100 Subject: really add schema-less nodes I've disabled disc-to-ram conversions, for now. Mnesia:change_table_copy_type(schema, node(), ram_copies) half-fails silently if mnesia isn't running. It removes the stuff in the db dir, but it gets recreated on the next mnesia:start/0. --- Makefile | 2 +- src/rabbit_mnesia.erl | 64 +++++++++++++++++++++++++++++++++------------------ src/rabbit_tests.erl | 15 ++++++------ 3 files changed, 51 insertions(+), 30 deletions(-) diff --git a/Makefile b/Makefile index d8ef058e..bc903147 100644 --- a/Makefile +++ b/Makefile @@ -163,7 +163,7 @@ run-node: all run-tests: all OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \ - echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null + echo -e $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index b38b43c4..cbbb00be 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -430,7 +430,7 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), - WasDiskNode = mnesia:system_info(use_dir), + WasDiskNode = filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")), case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of {ok, Nodes} -> case Force of @@ -444,20 +444,18 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> end; true -> ok end, - if (WasDiskNode andalso (not IsDiskNode)) -> - rabbit_log:warning("converting from disc to ram node; backing up database"), - move_db(); - true -> ok - end, %% We create a new db (on disk, or in ram) in the first %% three cases and attempt to upgrade the in the other two case {Nodes, WasDiskNode, IsDiskNode} of - {_, _, false} -> - ok = create_tables(false), - ok = rabbit_version:record_desired(); + {_, true, false} -> + throw({error, {cannot_convert_disc_to_ram, + "Cannot convert a disc node to a ram node." + " Reset first."}}); + {[], _, false} -> + ok = create_schema(false); {[], false, true} -> %% Nothing there at all, start from scratch - ok = create_schema(); + ok = create_schema(true); {[], true, true} -> %% We're the first node up ok = case rabbit_upgrade:maybe_upgrade_local() of @@ -522,23 +520,29 @@ ensure_version_ok({error, _}) -> ok = rabbit_version:record_desired(). create_schema() -> + create_schema(true). + +create_schema(OnDisk) -> + Nodes = if OnDisk -> [node()]; + true -> [] + end, mnesia:stop(), - rabbit_misc:ensure_ok(mnesia:create_schema([node()]), + rabbit_misc:ensure_ok(mnesia:create_schema(Nodes), cannot_create_schema), rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = create_tables(), + if not OnDisk -> + mnesia:change_table_copy_type(schema, node(), ram_copies); + true -> ok + end, + ok = create_tables(OnDisk), ensure_schema_integrity(), ok = rabbit_version:record_desired(). move_db() -> mnesia:stop(), MnesiaDir = filename:dirname(dir() ++ "/"), - {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(), - BackupDir = lists:flatten( - io_lib:format("~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w", - [MnesiaDir, - Year, Month, Day, Hour, Minute, Second])), + BackupDir = new_backup_dir_name(MnesiaDir), case file:rename(MnesiaDir, BackupDir) of ok -> %% NB: we cannot use rabbit_log here since it may not have @@ -553,6 +557,19 @@ move_db() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok. +new_backup_dir_name(MnesiaDir) -> + {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(), + BackupDir = lists:flatten( + io_lib:format("~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w", + [MnesiaDir, + Year, Month, Day, Hour, Minute, Second])), + case filelib:is_file(BackupDir) of + false -> BackupDir; + true -> receive + after 1000 -> new_backup_dir_name(MnesiaDir) + end + end. + copy_db(Destination) -> ok = ensure_mnesia_not_running(), rabbit_misc:recursive_copy(dir(), Destination). @@ -560,16 +577,19 @@ copy_db(Destination) -> create_tables() -> create_tables(true). -create_tables(IsDiskNode) -> +create_tables(OnDisk) -> lists:foreach(fun ({Tab, TabDef}) -> TabDef1 = proplists:delete(match, TabDef), - TabDef2 = case IsDiskNode of + TabDef2 = case OnDisk of true -> TabDef1; false -> - [{disc_copies, []} | - proplists:delete(disc_copies, - TabDef1)] + [{disc_copies, []}, + {ram_copies, [node()]} | + proplists:delete( + ram_copies, + proplists:delete(disc_copies, + TabDef1))] end, case mnesia:create_table(Tab, TabDef2) of {atomic, ok} -> ok; diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 51185879..b2d41c64 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -951,8 +951,9 @@ test_cluster_management() -> ok = control_action(reset, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), - ok = control_action(force_cluster, ["invalid1@invalid", - "invalid2@invalid"]), + {error, {cannot_convert_disc_to_ram, _}} = + control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), %% join a non-existing cluster as a ram node ok = control_action(reset, []), @@ -992,12 +993,14 @@ test_cluster_management2(SecondaryNode) -> ok = control_action(stop_app, []), %% join non-existing cluster as a ram node + ok = control_action(reset, []), ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), ok = control_action(start_app, []), ok = control_action(stop_app, []), %% join empty cluster as a ram node + ok = control_action(reset, []), ok = control_action(cluster, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), @@ -1009,13 +1012,11 @@ test_cluster_management2(SecondaryNode) -> ok = control_action(stop_app, []), %% convert a disk node into a ram node - ok = control_action(force_cluster, ["invalid1@invalid", - "invalid2@invalid"]), + {error, {cannot_convert_disc_to_ram, _}} = + control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), %% turn a disk node into a ram node - receive - after 1000 -> ok - end, ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS]), ok = control_action(start_app, []), -- cgit v1.2.1 From 0fe414015c8640010a7b4e3d2dad7e2a4304a534 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 19 Jul 2011 14:56:20 +0100 Subject: comment --- src/rabbit_mnesia.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index cbbb00be..ff6b0b11 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -451,7 +451,8 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> throw({error, {cannot_convert_disc_to_ram, "Cannot convert a disc node to a ram node." " Reset first."}}); - {[], _, false} -> + {[], false, false} -> + %% New ram node; start from scratch ok = create_schema(false); {[], false, true} -> %% Nothing there at all, start from scratch -- cgit v1.2.1 From 171efe1aca1e1c1aa1de9e9f26409b65bf75c94b Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 11:07:51 +0100 Subject: enable disc-less nodes The main trouble with this is mnesia:change_config(extra_db_nodes,...). If the current node doesn't have a schema with all the tables created, it will get it from one of the nodes it connects to. This is a problem if we're a disc node and they're a ram node or vice versa. So, after we get the tables from the other node and wait_for_remote_tables, we assert that their storage location is correct and change it if not. If the other node is disc and we are ram, this unfortunately means that the tables are first written to disc and then converted to ram, but since this is a one-off operation during startup, it shouldn't be a problem. --- src/rabbit_mnesia.erl | 83 ++++++++++++++++++++++++++++++++++---------------- src/rabbit_tests.erl | 10 +++--- src/rabbit_upgrade.erl | 10 ++---- 3 files changed, 62 insertions(+), 41 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index ff6b0b11..1a6825ee 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -23,7 +23,8 @@ empty_ram_only_tables/0, copy_db/1, wait_for_tables/1, create_cluster_nodes_config/1, read_cluster_nodes_config/0, record_running_nodes/0, read_previously_running_nodes/0, - delete_previously_running_nodes/0, running_nodes_filename/0]). + delete_previously_running_nodes/0, running_nodes_filename/0, + is_disc_node/0]). -export([table_names/0]). @@ -65,6 +66,7 @@ -spec(read_previously_running_nodes/0 :: () -> [node()]). -spec(delete_previously_running_nodes/0 :: () -> 'ok'). -spec(running_nodes_filename/0 :: () -> file:filename()). +-spec(is_disc_node/0 :: () -> boolean()). -endif. @@ -429,8 +431,8 @@ delete_previously_running_nodes() -> init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], - IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), - WasDiskNode = filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")), + IsDiskNode = lists:member(node(), ClusterNodes), + WasDiskNode = is_disc_node(), case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of {ok, Nodes} -> case Force of @@ -448,9 +450,12 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> %% three cases and attempt to upgrade the in the other two case {Nodes, WasDiskNode, IsDiskNode} of {_, true, false} -> - throw({error, {cannot_convert_disc_to_ram, - "Cannot convert a disc node to a ram node." - " Reset first."}}); + %% Converting disc node to ram + mnesia:stop(), + move_db(), + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), + ok = create_schema(false); {[], false, false} -> %% New ram node; start from scratch ok = create_schema(false); @@ -469,12 +474,13 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> %% Subsequent node in cluster, catch up ensure_version_ok( rpc:call(AnotherNode, rabbit_version, recorded, [])), - ok = wait_for_replicated_tables(), {CopyType, CopyTypeAlt} = case IsDiskNode of true -> {disc, disc_copies}; false -> {ram, ram_copies} end, + ok = wait_for_replicated_tables(), + assert_tables_copy_type(CopyTypeAlt), ok = create_local_table_copy(schema, CopyTypeAlt), ok = create_local_table_copies(CopyType), ok = SecondaryPostMnesiaFun(), @@ -524,22 +530,25 @@ create_schema() -> create_schema(true). create_schema(OnDisk) -> - Nodes = if OnDisk -> [node()]; - true -> [] - end, - mnesia:stop(), - rabbit_misc:ensure_ok(mnesia:create_schema(Nodes), - cannot_create_schema), - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), - if not OnDisk -> - mnesia:change_table_copy_type(schema, node(), ram_copies); - true -> ok + if OnDisk -> + mnesia:stop(), + rabbit_misc:ensure_ok(mnesia:create_schema([node()]), + cannot_create_schema), + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia); + true -> + ok end, ok = create_tables(OnDisk), ensure_schema_integrity(), ok = rabbit_version:record_desired(). +is_disc_node() -> + %% This is pretty ugly but we can't start Mnesia and ask it (will hang), + %% we can't look at the config file (may not include us even if we're a + %% disc node). + filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). + move_db() -> mnesia:stop(), MnesiaDir = filename:dirname(dir() ++ "/"), @@ -582,15 +591,8 @@ create_tables(OnDisk) -> lists:foreach(fun ({Tab, TabDef}) -> TabDef1 = proplists:delete(match, TabDef), TabDef2 = case OnDisk of - true -> - TabDef1; - false -> - [{disc_copies, []}, - {ram_copies, [node()]} | - proplists:delete( - ram_copies, - proplists:delete(disc_copies, - TabDef1))] + true -> TabDef1; + false -> copy_type_to_ram(TabDef1) end, case mnesia:create_table(Tab, TabDef2) of {atomic, ok} -> ok; @@ -603,9 +605,36 @@ create_tables(OnDisk) -> table_definitions()), ok. +copy_type_to_ram(TabDef) -> + [{disc_copies, []}, {ram_copies, [node()]} + | proplists:delete(ram_copies, proplists:delete(disc_copies, TabDef))]. + table_has_copy_type(TabDef, DiscType) -> lists:member(node(), proplists:get_value(DiscType, TabDef, [])). +assert_tables_copy_type(CopyTypeAlt) -> + lists:foreach( + fun({Tab, TabDef}) -> + HasDiscCopies = table_has_copy_type(TabDef, disc_copies), + HasDiscOnlyCopies = table_has_copy_type(TabDef, disc_only_copies), + StorageType = if HasDiscCopies -> disc_copies; + HasDiscOnlyCopies -> disc_only_copies; + true -> ram_copies + end, + StorageType1 = if CopyTypeAlt =:= disc_copies -> StorageType; + true -> ram_copies + end, + case mnesia:table_info(Tab, storage_type) of + StorageType1 -> ok; + unknown -> ok; + _ -> io:format("~p to ~p: ~p~n", [Tab, StorageType1, mnesia:change_table_copy_type(Tab, node(), StorageType1)]) + end + end, table_definitions()), + case mnesia:table_info(schema, storage_type) of + CopyTypeAlt -> ok; + _ -> io:format("~p to ~p: ~p~n", [schema, CopyTypeAlt, mnesia:change_table_copy_type(schema, node(), CopyTypeAlt)]) + end. + create_local_table_copies(Type) -> lists:foreach( fun ({Tab, TabDef}) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index b2d41c64..766cad83 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -951,9 +951,8 @@ test_cluster_management() -> ok = control_action(reset, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), - {error, {cannot_convert_disc_to_ram, _}} = - control_action(force_cluster, ["invalid1@invalid", - "invalid2@invalid"]), + ok = control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), %% join a non-existing cluster as a ram node ok = control_action(reset, []), @@ -1012,9 +1011,8 @@ test_cluster_management2(SecondaryNode) -> ok = control_action(stop_app, []), %% convert a disk node into a ram node - {error, {cannot_convert_disc_to_ram, _}} = - control_action(force_cluster, ["invalid1@invalid", - "invalid2@invalid"]), + ok = control_action(force_cluster, ["invalid1@invalid", + "invalid2@invalid"]), %% turn a disk node into a ram node ok = control_action(reset, []), diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index a2abb1e5..e6b52c61 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -144,7 +144,7 @@ upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previously_running_nodes(), - case {is_disc_node(), AfterUs} of + case {rabbit_mnesia:is_disc_node(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -182,12 +182,6 @@ upgrade_mode(AllNodes) -> end end. -is_disc_node() -> - %% This is pretty ugly but we can't start Mnesia and ask it (will hang), - %% we can't look at the config file (may not include us even if we're a - %% disc node). - filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). - die(Msg, Args) -> %% We don't throw or exit here since that gets thrown %% straight out into do_boot, generating an erl_crash.dump @@ -218,7 +212,7 @@ force_tables() -> secondary_upgrade(AllNodes) -> %% must do this before we wipe out schema - IsDiscNode = is_disc_node(), + IsDiscNode = rabbit_mnesia:is_disc_node(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), %% Note that we cluster with all nodes, rather than all disc nodes -- cgit v1.2.1 From 6ece8b2fccb9abee4a478b5b5116f79ec70bf3e1 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 11:17:40 +0100 Subject: check that a ram node is indeed a ram node --- src/rabbit_tests.erl | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 766cad83..adb3e3ed 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -953,11 +953,13 @@ test_cluster_management() -> ok = control_action(stop_app, []), ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), + ok = assert_ram_node(), %% join a non-existing cluster as a ram node ok = control_action(reset, []), ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), + ok = assert_ram_node(), SecondaryNode = rabbit_misc:makenode("hare"), case net_adm:ping(SecondaryNode) of @@ -979,12 +981,14 @@ test_cluster_management2(SecondaryNode) -> %% make a ram node ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS]), + ok = assert_ram_node(), %% join cluster as a ram node ok = control_action(reset, []), ok = control_action(force_cluster, [SecondaryNodeS, "invalid1@invalid"]), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_ram_node(), %% change cluster config while remaining in same cluster ok = control_action(force_cluster, ["invalid2@invalid", SecondaryNodeS]), @@ -997,12 +1001,14 @@ test_cluster_management2(SecondaryNode) -> "invalid2@invalid"]), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_ram_node(), %% join empty cluster as a ram node ok = control_action(reset, []), ok = control_action(cluster, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_ram_node(), %% turn ram node into disk node ok = control_action(reset, []), @@ -1013,12 +1019,14 @@ test_cluster_management2(SecondaryNode) -> %% convert a disk node into a ram node ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), + ok = assert_ram_node(), %% turn a disk node into a ram node ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS]), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_ram_node(), %% NB: this will log an inconsistent_database error, which is harmless %% Turning cover on / off is OK even if we're not in general using cover, @@ -1582,6 +1590,12 @@ clean_logs(Files, Suffix) -> end || File <- Files], ok. +assert_ram_node() -> + case rabbit_mnesia:is_disc_node() of + true -> exit('not_ram_node'); + false -> ok + end. + delete_file(File) -> case file:delete(File) of ok -> ok; -- cgit v1.2.1 From dd13e395fd5623f057f552b3c17dc9a811b731a4 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 12:42:37 +0100 Subject: test that disc nodes are actually disc nodes The tests used to be slightly wrong. They'd expect: rabbitmqctl cluster to create a cluster with a single ram node. It didn't, instead, it created a cluster with a single disc node. Since we weren't actually checking the resulting node type, everything went along fine. Assert_tables_copy_type is complicated by all the cases it has to handle, namely converting disc -> ram and ram -> disc. For disc -> ram, we first need to convert all the tables to ram, then, we need to convert the schema (converting it before fails with "Disc resident tables"). For ram -> disc, we first need to convert the schema, otherwise, all the table conversions will fail with 'has_no_disc'. Regarding an earlier commit, using mensia:system_info(use_dir) to check if we have a disc node is wrong because we create the directory for the message_store anyway. --- src/rabbit_mnesia.erl | 18 +++++++++++++++--- src/rabbit_tests.erl | 26 ++++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 1a6825ee..805b4152 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -431,7 +431,7 @@ delete_previously_running_nodes() -> init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], - IsDiskNode = lists:member(node(), ClusterNodes), + IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), WasDiskNode = is_disc_node(), case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of {ok, Nodes} -> @@ -613,6 +613,14 @@ table_has_copy_type(TabDef, DiscType) -> lists:member(node(), proplists:get_value(DiscType, TabDef, [])). assert_tables_copy_type(CopyTypeAlt) -> + case mnesia:table_info(schema, storage_type) of + CopyTypeAlt -> ok; + _ -> case mnesia:change_table_copy_type(schema, node(), CopyTypeAlt) of + {aborted, {"Disc resident tables", _, _}} -> ok; + {atomic, ok} -> ok; + E -> exit({'node_conversion_failed', E}) + end + end, lists:foreach( fun({Tab, TabDef}) -> HasDiscCopies = table_has_copy_type(TabDef, disc_copies), @@ -627,12 +635,16 @@ assert_tables_copy_type(CopyTypeAlt) -> case mnesia:table_info(Tab, storage_type) of StorageType1 -> ok; unknown -> ok; - _ -> io:format("~p to ~p: ~p~n", [Tab, StorageType1, mnesia:change_table_copy_type(Tab, node(), StorageType1)]) + _ -> + {atomic, ok} = mnesia:change_table_copy_type( + Tab, node(), StorageType1) end end, table_definitions()), case mnesia:table_info(schema, storage_type) of CopyTypeAlt -> ok; - _ -> io:format("~p to ~p: ~p~n", [schema, CopyTypeAlt, mnesia:change_table_copy_type(schema, node(), CopyTypeAlt)]) + _ -> + {atomic, ok} = mnesia:change_table_copy_type( + schema, node(), CopyTypeAlt) end. create_local_table_copies(Type) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index adb3e3ed..700e90bd 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -951,6 +951,7 @@ test_cluster_management() -> ok = control_action(reset, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_disc_node(), ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), ok = assert_ram_node(), @@ -978,6 +979,7 @@ test_cluster_management2(SecondaryNode) -> %% make a disk node ok = control_action(reset, []), ok = control_action(cluster, [NodeS]), + ok = assert_disc_node(), %% make a ram node ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS]), @@ -1003,24 +1005,38 @@ test_cluster_management2(SecondaryNode) -> ok = control_action(stop_app, []), ok = assert_ram_node(), - %% join empty cluster as a ram node + %% join empty cluster as a ram node (converts to disc) ok = control_action(reset, []), ok = control_action(cluster, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_disc_node(), + + %% make a new ram node + ok = control_action(reset, []), + ok = control_action(force_cluster, [SecondaryNodeS]), + ok = control_action(start_app, []), + ok = control_action(stop_app, []), ok = assert_ram_node(), %% turn ram node into disk node - ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS, NodeS]), ok = control_action(start_app, []), ok = control_action(stop_app, []), + ok = assert_disc_node(), %% convert a disk node into a ram node + ok = assert_disc_node(), ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), ok = assert_ram_node(), + %% make a new disk node + ok = control_action(force_reset, []), + ok = control_action(start_app, []), + ok = control_action(stop_app, []), + ok = assert_disc_node(), + %% turn a disk node into a ram node ok = control_action(reset, []), ok = control_action(cluster, [SecondaryNodeS]), @@ -1596,6 +1612,12 @@ assert_ram_node() -> false -> ok end. +assert_disc_node() -> + case rabbit_mnesia:is_disc_node() of + true -> ok; + false -> exit('not_disc_node') + end. + delete_file(File) -> case file:delete(File) of ok -> ok; -- cgit v1.2.1 From 54bc319414f0f04f7961abcbff8a72c40c063286 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 12:53:49 +0100 Subject: remove two resets introduced earlier We don't need them anymore. BTW, when generating the backup db directory name, we used to just append the date (including minutes, seconds) to the current directory name. But since we do an extra move_db/0 now (when converting from disc to ram), some of the tests were fast enough to try to backup the database twice in the same second. Needless to say, this would cause an error. So, now we generate the new name as before, but if it's already used, we wait 1s and try again. --- src/rabbit_tests.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 46c33fc1..af201120 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -998,7 +998,6 @@ test_cluster_management2(SecondaryNode) -> ok = control_action(stop_app, []), %% join non-existing cluster as a ram node - ok = control_action(reset, []), ok = control_action(force_cluster, ["invalid1@invalid", "invalid2@invalid"]), ok = control_action(start_app, []), @@ -1006,7 +1005,6 @@ test_cluster_management2(SecondaryNode) -> ok = assert_ram_node(), %% join empty cluster as a ram node (converts to disc) - ok = control_action(reset, []), ok = control_action(cluster, []), ok = control_action(start_app, []), ok = control_action(stop_app, []), -- cgit v1.2.1 From c2bc79c3aa64213305e79d61895127d3d0fff507 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 15:28:01 +0100 Subject: reset before changing node type Matthias points out that the only way for mnesia:change_config to start creating tables is for the node to re-join the cluster (and get its old table definitions from other nodes). Instead of fixing the tables after that, we now reset inside the cluster command. Note that in very old rabbit versions, we'd just prevent this by having the user reset manually. --- src/rabbit_mnesia.erl | 57 ++++++++++++++++++--------------------------------- src/rabbit_tests.erl | 4 ++++ 2 files changed, 24 insertions(+), 37 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 805b4152..6b901eae 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -117,6 +117,22 @@ force_cluster(ClusterNodes) -> cluster(ClusterNodes, Force) -> ensure_mnesia_not_running(), ensure_mnesia_dir(), + + %% Reset the node if we're in a cluster and have just changed node type + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + AllClusteredNodes = + lists:usort(all_clustered_nodes() ++ + read_cluster_nodes_config()) -- [node()], + mnesia:stop(), + case {AllClusteredNodes =/= [], + is_disc_node() =/= should_be_disc_node(ClusterNodes)} of + {true, true} -> error_logger:warning_msg("changing node type; " + "resetting...~n"), + reset(); + {_, _} -> ok + end, + + %% Join the cluster rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), try ok = init_db(ClusterNodes, Force, @@ -431,7 +447,7 @@ delete_previously_running_nodes() -> init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], - IsDiskNode = ClusterNodes == [] orelse lists:member(node(), ClusterNodes), + IsDiskNode = should_be_disc_node(ClusterNodes), WasDiskNode = is_disc_node(), case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of {ok, Nodes} -> @@ -480,7 +496,6 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> false -> {ram, ram_copies} end, ok = wait_for_replicated_tables(), - assert_tables_copy_type(CopyTypeAlt), ok = create_local_table_copy(schema, CopyTypeAlt), ok = create_local_table_copies(CopyType), ok = SecondaryPostMnesiaFun(), @@ -549,6 +564,9 @@ is_disc_node() -> %% disc node). filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). +should_be_disc_node(ClusterNodes) -> + ClusterNodes == [] orelse lists:member(node(), ClusterNodes). + move_db() -> mnesia:stop(), MnesiaDir = filename:dirname(dir() ++ "/"), @@ -612,41 +630,6 @@ copy_type_to_ram(TabDef) -> table_has_copy_type(TabDef, DiscType) -> lists:member(node(), proplists:get_value(DiscType, TabDef, [])). -assert_tables_copy_type(CopyTypeAlt) -> - case mnesia:table_info(schema, storage_type) of - CopyTypeAlt -> ok; - _ -> case mnesia:change_table_copy_type(schema, node(), CopyTypeAlt) of - {aborted, {"Disc resident tables", _, _}} -> ok; - {atomic, ok} -> ok; - E -> exit({'node_conversion_failed', E}) - end - end, - lists:foreach( - fun({Tab, TabDef}) -> - HasDiscCopies = table_has_copy_type(TabDef, disc_copies), - HasDiscOnlyCopies = table_has_copy_type(TabDef, disc_only_copies), - StorageType = if HasDiscCopies -> disc_copies; - HasDiscOnlyCopies -> disc_only_copies; - true -> ram_copies - end, - StorageType1 = if CopyTypeAlt =:= disc_copies -> StorageType; - true -> ram_copies - end, - case mnesia:table_info(Tab, storage_type) of - StorageType1 -> ok; - unknown -> ok; - _ -> - {atomic, ok} = mnesia:change_table_copy_type( - Tab, node(), StorageType1) - end - end, table_definitions()), - case mnesia:table_info(schema, storage_type) of - CopyTypeAlt -> ok; - _ -> - {atomic, ok} = mnesia:change_table_copy_type( - schema, node(), CopyTypeAlt) - end. - create_local_table_copies(Type) -> lists:foreach( fun ({Tab, TabDef}) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index af201120..0c1a04b2 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1066,6 +1066,10 @@ test_cluster_management2(SecondaryNode) -> {error, {no_running_cluster_nodes, _, _}} = control_action(reset, []), + %% attempt to change type when no other node is alive + {error, {no_running_cluster_nodes, _, _}} = + control_action(cluster, [SecondaryNodeS]), + %% leave system clustered, with the secondary node as a ram node ok = control_action(force_reset, []), ok = control_action(start_app, []), -- cgit v1.2.1 From 287f643f8c443f4d019f5a14a09d1dbd94baa4e7 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 16:53:17 +0100 Subject: always reset and preemptively leave clusters Always backup and reset during clustering if the node type has changed. Worst case, it ensures that we actually have an empty mnesia dir with new ram nodes. Also, preemptively leave a cluster before joining it. Suppose we had a two-node cluster, the first node goes down, the second hard resets, the first node comes back up, the second node tries to rejoin the cluster with a different type. Since it hard-reset, it doesn't know that it used to be part of the cluster, and the other node is unaware that our node is supposed to have left the cluster. So, when clustering, we always try to leave a cluster before joining it. In leave_cluster/2, I added {aborted, {node_not_running, _}} to the "not error" returns, because it looks similar to {badrpc, nodedown}, which was already there. This may be wrong. --- src/rabbit_mnesia.erl | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 6b901eae..56bd2bfa 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -118,18 +118,27 @@ cluster(ClusterNodes, Force) -> ensure_mnesia_not_running(), ensure_mnesia_dir(), - %% Reset the node if we're in a cluster and have just changed node type - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - AllClusteredNodes = - lists:usort(all_clustered_nodes() ++ - read_cluster_nodes_config()) -- [node()], - mnesia:stop(), - case {AllClusteredNodes =/= [], - is_disc_node() =/= should_be_disc_node(ClusterNodes)} of - {true, true} -> error_logger:warning_msg("changing node type; " - "resetting...~n"), - reset(); - {_, _} -> ok + %% Reset the node if we've just changed node type + case {is_disc_node(), should_be_disc_node(ClusterNodes)} of + {true, false} -> error_logger:warning_msg( + "changing node type; backing up db and " + "resetting...~n"), + ok = move_db(), + mnesia:stop(), + reset(); + _ -> ok + end, + + %% Pre-emtively leave the cluster (in case we had been part of it + %% and force_reseted) + ProperClusterNodes = ClusterNodes -- [node()], + try leave_cluster(ProperClusterNodes, ProperClusterNodes) of + ok -> ok + catch + throw:({error, {no_running_cluster_nodes, _, _}} = E) -> + if Force -> ok; + true -> throw(E) + end end, %% Join the cluster @@ -496,6 +505,7 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> false -> {ram, ram_copies} end, ok = wait_for_replicated_tables(), + ok = create_local_table_copy(schema, CopyTypeAlt), ok = create_local_table_copies(CopyType), ok = SecondaryPostMnesiaFun(), @@ -722,6 +732,7 @@ leave_cluster(Nodes, RunningNodes) -> [schema, node()]) of {atomic, ok} -> true; {badrpc, nodedown} -> false; + {aborted, {node_not_running, _}} -> false; {aborted, Reason} -> throw({error, {failed_to_leave_cluster, Nodes, RunningNodes, Reason}}) -- cgit v1.2.1 From dd720bd4e5323917d3d1c487de459418c37026a4 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 17:46:05 +0100 Subject: handle the starting/stopping returns from mnesia --- src/rabbit_mnesia.erl | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 56bd2bfa..3f5bc134 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -85,7 +85,9 @@ status() -> no -> case all_clustered_nodes() of [] -> []; Nodes -> [{unknown, Nodes}] - end + end; + Reason when Reason =:= starting; Reason =:= stopping -> + exit({rabbit_busy, try_again_later}) end}, {running_nodes, running_clustered_nodes()}]. @@ -316,13 +318,23 @@ ensure_mnesia_dir() -> ensure_mnesia_running() -> case mnesia:system_info(is_running) of yes -> ok; - no -> throw({error, mnesia_not_running}) + no -> throw({error, mnesia_not_running}); + Reason when Reason =:= starting; Reason =:= stopping -> + wait_and_try_again(ensure_mnesia_running, []) end. ensure_mnesia_not_running() -> case mnesia:system_info(is_running) of no -> ok; - yes -> throw({error, mnesia_unexpectedly_running}) + yes -> throw({error, mnesia_unexpectedly_running}); + Reason when Reason =:= starting; Reason =:= stopping -> + wait_and_try_again(ensure_mnesia_not_running, []) + end. + +wait_and_try_again(Fun, Args) -> + receive + after 1000 -> error_logger:info_msg("trying to ~p again~n", [Fun, Args]), + apply(Fun, Args) end. ensure_schema_integrity() -> @@ -603,9 +615,7 @@ new_backup_dir_name(MnesiaDir) -> Year, Month, Day, Hour, Minute, Second])), case filelib:is_file(BackupDir) of false -> BackupDir; - true -> receive - after 1000 -> new_backup_dir_name(MnesiaDir) - end + true -> wait_and_try_again(new_backup_dir_name, [MnesiaDir]) end. copy_db(Destination) -> @@ -743,3 +753,4 @@ leave_cluster(Nodes, RunningNodes) -> false -> throw({error, {no_running_cluster_nodes, Nodes, RunningNodes}}) end. + -- cgit v1.2.1 From a5280c3a64dad7e7458bfd0b3a0f262cbf6ddaee Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 20 Jul 2011 18:06:02 +0100 Subject: refactor --- src/rabbit_mnesia.erl | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 3f5bc134..d642d17a 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -318,24 +318,24 @@ ensure_mnesia_dir() -> ensure_mnesia_running() -> case mnesia:system_info(is_running) of yes -> ok; - no -> throw({error, mnesia_not_running}); - Reason when Reason =:= starting; Reason =:= stopping -> - wait_and_try_again(ensure_mnesia_running, []) + starting -> waiting_for(mnesia_running), + ensure_mnesia_running(); + Reason when Reason =:= no; Reason =:= stopping -> + throw({error, mnesia_not_running}) end. ensure_mnesia_not_running() -> case mnesia:system_info(is_running) of no -> ok; - yes -> throw({error, mnesia_unexpectedly_running}); - Reason when Reason =:= starting; Reason =:= stopping -> - wait_and_try_again(ensure_mnesia_not_running, []) + stopping -> waiting_for(mnesia_not_running), + ensure_mnesia_not_running(); + Reason when Reason =:= yes; Reason =:= starting -> + throw({error, mnesia_unexpectedly_running}) end. -wait_and_try_again(Fun, Args) -> - receive - after 1000 -> error_logger:info_msg("trying to ~p again~n", [Fun, Args]), - apply(Fun, Args) - end. +waiting_for(Condition) -> + error_logger:info_msg("Waiting for ~p...~n", [Condition]), + timer:sleep(1000). ensure_schema_integrity() -> case check_schema_integrity() of @@ -615,7 +615,8 @@ new_backup_dir_name(MnesiaDir) -> Year, Month, Day, Hour, Minute, Second])), case filelib:is_file(BackupDir) of false -> BackupDir; - true -> wait_and_try_again(new_backup_dir_name, [MnesiaDir]) + true -> waiting_for(new_backup_dir_name), + new_backup_dir_name(MnesiaDir) end. copy_db(Destination) -> -- cgit v1.2.1 From 9a8c03eb7dc38db3c9a1dd63ae029d21ac6d7118 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 13:20:07 +0100 Subject: refactor and re-sync ram nodes A bit of trouble performing local upgrades on ram nodes: the upgrade process takes down Mnesia, which causes it to lose its schema and its recorded table info, which causes the ensuing schema check to fail. Solution: after doing the secondary upgrades, re-cluster ram nodes and wait for them to sync up, again. --- src/rabbit_mnesia.erl | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index d642d17a..bc749325 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -193,6 +193,16 @@ nodes_of_type(Type) -> mnesia:table_info(rabbit_durable_exchange, Type). table_definitions() -> + table_definitions(is_disc_node()). + +%% The tables aren't supposed to be on disk on a ram node +table_definitions(true) -> + real_table_definitions(); +table_definitions(false) -> + [{Tab, copy_type_to_ram(TabDef)} + || {Tab, TabDef} <- real_table_definitions()]. + +real_table_definitions() -> [{rabbit_user, [{record_name, internal_user}, {attributes, record_info(fields, internal_user)}, @@ -297,10 +307,10 @@ resource_match(Kind) -> #resource{kind = Kind, _='_'}. table_names() -> - [Tab || {Tab, _} <- table_definitions()]. + [Tab || {Tab, _} <- real_table_definitions()]. replicated_table_names() -> - [Tab || {Tab, TabDef} <- table_definitions(), + [Tab || {Tab, TabDef} <- real_table_definitions(), not lists:member({local_content, true}, TabDef) ]. @@ -517,10 +527,21 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> false -> {ram, ram_copies} end, ok = wait_for_replicated_tables(), - ok = create_local_table_copy(schema, CopyTypeAlt), ok = create_local_table_copies(CopyType), + ok = SecondaryPostMnesiaFun(), + %% We've taken down mnesia, so ram nodes will need + %% to re-sync + case is_disc_node() of + false -> mnesia:start(), + ensure_mnesia_running(), + mnesia:change_config(extra_db_nodes, + ProperClusterNodes), + wait_for_replicated_tables(); + true -> ok + end, + ensure_schema_integrity(), ok end; @@ -629,19 +650,15 @@ create_tables() -> create_tables(OnDisk) -> lists:foreach(fun ({Tab, TabDef}) -> TabDef1 = proplists:delete(match, TabDef), - TabDef2 = case OnDisk of - true -> TabDef1; - false -> copy_type_to_ram(TabDef1) - end, - case mnesia:create_table(Tab, TabDef2) of + case mnesia:create_table(Tab, TabDef1) of {atomic, ok} -> ok; {aborted, {already_exists, Tab}} -> ok; {aborted, Reason} -> throw({error, {table_creation_failed, - Tab, TabDef2, Reason}}) + Tab, TabDef1, Reason}}) end end, - table_definitions()), + table_definitions(OnDisk)), ok. copy_type_to_ram(TabDef) -> @@ -677,7 +694,7 @@ create_local_table_copies(Type) -> end, ok = create_local_table_copy(Tab, StorageType) end, - table_definitions()), + table_definitions(Type =:= disc)), ok. create_local_table_copy(Tab, Type) -> -- cgit v1.2.1 From 4e6e379350955f16cfbe4bd21eb015c12c5b5bf0 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 13:54:03 +0100 Subject: oops --- src/rabbit_mnesia.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index bc749325..8c908ee5 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -535,10 +535,10 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> %% to re-sync case is_disc_node() of false -> mnesia:start(), - ensure_mnesia_running(), - mnesia:change_config(extra_db_nodes, - ProperClusterNodes), - wait_for_replicated_tables(); + ensure_mnesia_running(), + mnesia:change_config(extra_db_nodes, + ProperClusterNodes), + wait_for_replicated_tables(); true -> ok end, @@ -562,7 +562,7 @@ maybe_upgrade_local_or_record_desired() -> schema_ok_or_move() -> case check_schema_integrity() of - {true, ok} -> + ok -> ok; {error, Reason} -> %% NB: we cannot use rabbit_log here since it may not have been -- cgit v1.2.1 From 78a89d4d7e56ec4fb5e3a53f8f08688a7009016b Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 14:16:18 +0100 Subject: remove redundant code --- src/rabbit_mnesia.erl | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 8c908ee5..2f1e150a 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -494,16 +494,9 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> true -> ok end, %% We create a new db (on disk, or in ram) in the first - %% three cases and attempt to upgrade the in the other two + %% two cases and attempt to upgrade the in the other two case {Nodes, WasDiskNode, IsDiskNode} of - {_, true, false} -> - %% Converting disc node to ram - mnesia:stop(), - move_db(), - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), - ok = create_schema(false); - {[], false, false} -> + {[], _, false} -> %% New ram node; start from scratch ok = create_schema(false); {[], false, true} -> -- cgit v1.2.1 From ebe79e0a0fd844066b73d8e41b7930d6d52edd0e Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 16:48:17 +0100 Subject: remove impossible case --- src/rabbit_mnesia.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 2f1e150a..95ce31cf 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -645,7 +645,6 @@ create_tables(OnDisk) -> TabDef1 = proplists:delete(match, TabDef), case mnesia:create_table(Tab, TabDef1) of {atomic, ok} -> ok; - {aborted, {already_exists, Tab}} -> ok; {aborted, Reason} -> throw({error, {table_creation_failed, Tab, TabDef1, Reason}}) -- cgit v1.2.1 From f683b860dc7c39473f1f47f490cdbcba62709c38 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 17:10:58 +0100 Subject: don't wipe message store and lengthier comment --- src/rabbit_mnesia.erl | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 95ce31cf..27672874 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -120,19 +120,30 @@ cluster(ClusterNodes, Force) -> ensure_mnesia_not_running(), ensure_mnesia_dir(), - %% Reset the node if we've just changed node type + %% Wipe mnesia if we're changing type from disc to ram case {is_disc_node(), should_be_disc_node(ClusterNodes)} of {true, false} -> error_logger:warning_msg( - "changing node type; backing up db and " - "resetting...~n"), - ok = move_db(), - mnesia:stop(), - reset(); + "changing node type; wiping mnesia...~n"), + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema); _ -> ok end, - %% Pre-emtively leave the cluster (in case we had been part of it - %% and force_reseted) + %% Pre-emtively leave the cluster + %% + %% We're trying to handle the following two cases: + %% 1. We have a two-node cluster, where both nodes are disc nodes. + %% One node is re-clustered as a ram node. When it tries to + %% re-join the cluster, but before it has time to update its + %% tables definitions, the other node will order it to re-create + %% its disc tables. So, we need to leave the cluster before we + %% can join it again. + %% 2. We have a two-node cluster, where both nodes are disc nodes. + %% One node is forcefully reset (so, the other node thinks its + %% still a part of the cluster). The reset node is re-clustered + %% as a ram node. Same as above, we need to leave the cluster + %% before we can join it. But, since we don't know if we're in a + %% cluster or not, we just pre-emptively leave it before joining. ProperClusterNodes = ClusterNodes -- [node()], try leave_cluster(ProperClusterNodes, ProperClusterNodes) of ok -> ok -- cgit v1.2.1 From cb5cb8067aff39d52a672e8fc0910a6cc7a4373a Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 17:19:01 +0100 Subject: bring branch closer to default --- src/rabbit_mnesia.erl | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 27672874..4d5a6e71 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -515,12 +515,10 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> ok = create_schema(true); {[], true, true} -> %% We're the first node up - ok = case rabbit_upgrade:maybe_upgrade_local() of - ok -> - ensure_schema_integrity(); - version_not_available -> - schema_ok_or_move() - end; + case rabbit_upgrade:maybe_upgrade_local() of + ok -> ensure_schema_integrity(); + version_not_available -> ok = schema_ok_or_move() + end; {[AnotherNode|_], _, _} -> %% Subsequent node in cluster, catch up ensure_version_ok( @@ -538,7 +536,8 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> %% We've taken down mnesia, so ram nodes will need %% to re-sync case is_disc_node() of - false -> mnesia:start(), + false -> rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), ensure_mnesia_running(), mnesia:change_config(extra_db_nodes, ProperClusterNodes), -- cgit v1.2.1 From 591f2bf86d5eb9ce6fbe962251b0873e68aea6d3 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 17:47:22 +0100 Subject: remove the disc schema for stand-alone ram nodes --- src/rabbit_mnesia.erl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4d5a6e71..846a12b1 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -591,15 +591,17 @@ create_schema() -> create_schema(true). create_schema(OnDisk) -> + mnesia:stop(), if OnDisk -> - mnesia:stop(), rabbit_misc:ensure_ok(mnesia:create_schema([node()]), - cannot_create_schema), - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia); + cannot_create_schema); true -> - ok + %% remove the disc schema since this is a ram node + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema) end, + rabbit_misc:ensure_ok(mnesia:start(), + cannot_start_mnesia), ok = create_tables(OnDisk), ensure_schema_integrity(), ok = rabbit_version:record_desired(). -- cgit v1.2.1 From 634d471b7ba29e1942807bf0123aa98bd03108ce Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 17:51:01 +0100 Subject: don't use echo -e I don't want to risk breaking something with that right now. I'll test and file another bug if it's safe. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bc903147..d8ef058e 100644 --- a/Makefile +++ b/Makefile @@ -163,7 +163,7 @@ run-node: all run-tests: all OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \ - echo -e $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null + echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null start-background-node: $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \ -- cgit v1.2.1 From 0fc771bc246b22eae9a53dedf1362b9b405e59b0 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 17:54:08 +0100 Subject: remove starting/stopping improvements Moving to another branch... but I need the diff. --- src/rabbit_mnesia.erl | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 846a12b1..f151b7cf 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -85,9 +85,7 @@ status() -> no -> case all_clustered_nodes() of [] -> []; Nodes -> [{unknown, Nodes}] - end; - Reason when Reason =:= starting; Reason =:= stopping -> - exit({rabbit_busy, try_again_later}) + end end}, {running_nodes, running_clustered_nodes()}]. @@ -339,19 +337,13 @@ ensure_mnesia_dir() -> ensure_mnesia_running() -> case mnesia:system_info(is_running) of yes -> ok; - starting -> waiting_for(mnesia_running), - ensure_mnesia_running(); - Reason when Reason =:= no; Reason =:= stopping -> - throw({error, mnesia_not_running}) + no -> throw({error, mnesia_not_running}) end. ensure_mnesia_not_running() -> case mnesia:system_info(is_running) of no -> ok; - stopping -> waiting_for(mnesia_not_running), - ensure_mnesia_not_running(); - Reason when Reason =:= yes; Reason =:= starting -> - throw({error, mnesia_unexpectedly_running}) + yes -> throw({error, mnesia_unexpectedly_running}) end. waiting_for(Condition) -> -- cgit v1.2.1 From 1d1c4fb6a673dbd07a18bc3cf3a7962d2bf8e175 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 21 Jul 2011 18:07:19 +0100 Subject: cosmetic --- src/rabbit_mnesia.erl | 6 +++--- src/rabbit_tests.erl | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f151b7cf..b2369ec1 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -124,7 +124,7 @@ cluster(ClusterNodes, Force) -> "changing node type; wiping mnesia...~n"), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema); - _ -> ok + _ -> ok end, %% Pre-emtively leave the cluster @@ -633,8 +633,8 @@ new_backup_dir_name(MnesiaDir) -> Year, Month, Day, Hour, Minute, Second])), case filelib:is_file(BackupDir) of false -> BackupDir; - true -> waiting_for(new_backup_dir_name), - new_backup_dir_name(MnesiaDir) + true -> waiting_for(new_backup_dir_name), + new_backup_dir_name(MnesiaDir) end. copy_db(Destination) -> diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 0c1a04b2..e6802563 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1610,13 +1610,13 @@ clean_logs(Files, Suffix) -> assert_ram_node() -> case rabbit_mnesia:is_disc_node() of - true -> exit('not_ram_node'); + true -> exit('not_ram_node'); false -> ok end. assert_disc_node() -> case rabbit_mnesia:is_disc_node() of - true -> ok; + true -> ok; false -> exit('not_disc_node') end. -- cgit v1.2.1 From cf2afc3c03850c9ec5618b8888c926d100599f32 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 21 Jul 2011 18:08:49 +0100 Subject: Support optionally formatting the priority queue mailbox, and do so on the queue. --- src/gen_server2.erl | 23 ++++++++++++++--------- src/rabbit_amqqueue_process.erl | 13 +++++++++++++ 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 43e0a8f5..d1b24714 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -1161,17 +1161,22 @@ format_status(Opt, StatusData) -> end, Header = lists:concat(["Status for generic server ", NameTag]), Log = sys:get_debug(log, Debug, []), - Specfic = - case erlang:function_exported(Mod, format_status, 2) of - true -> case catch Mod:format_status(Opt, [PDict, State]) of - {'EXIT', _} -> [{data, [{"State", State}]}]; - Else -> Else - end; - _ -> [{data, [{"State", State}]}] - end, + Specfic = callback_format_status(Opt, Mod, format_status, [PDict, State], + [{data, [{"State", State}]}]), + Messages = callback_format_status(Opt, Mod, format_priority_mailbox, Queue, + priority_queue:to_list(Queue)), [{header, Header}, {data, [{"Status", SysState}, {"Parent", Parent}, {"Logged events", Log}, - {"Queued messages", priority_queue:to_list(Queue)}]} | + {"Queued messages", Messages}]} | Specfic]. + +callback_format_status(Opt, Mod, FunName, Args, Default) -> + case erlang:function_exported(Mod, FunName, 2) of + true -> case catch Mod:FunName(Opt, Args) of + {'EXIT', _} -> Default; + Else -> Else + end; + _ -> Default + end. diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index fcd6cc24..60c1135e 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -35,6 +35,8 @@ -export([init_with_backing_queue_state/7]). +-export([format_priority_mailbox/2]). + %% Queue's state -record(q, {q, exclusive_consumer, @@ -1162,3 +1164,14 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer), backing_queue_state = BQS3}, {hibernate, stop_rate_timer(State1)}. + +format_priority_mailbox(_Opt, Mailbox) -> + Len = priority_queue:len(Mailbox), + case Len > 100 of + false -> {Len, priority_queue:to_list(Mailbox)}; + true -> {Len, {dict:to_list( + lists:foldl( + fun ({P, _V}, Counts) -> + dict:update_counter(P, 1, Counts) + end, dict:new(), priority_queue:to_list(Mailbox)))}} + end. -- cgit v1.2.1 From 1fbe4894892d45736e8c17c52f2ddaa3f347eda7 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 21 Jul 2011 18:17:42 +0100 Subject: factoring --- src/rabbit_amqqueue_process.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 60c1135e..f0db479f 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -1167,11 +1167,11 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, format_priority_mailbox(_Opt, Mailbox) -> Len = priority_queue:len(Mailbox), - case Len > 100 of - false -> {Len, priority_queue:to_list(Mailbox)}; - true -> {Len, {dict:to_list( + {Len, case Len > 100 of + false -> priority_queue:to_list(Mailbox); + true -> {dict:to_list( lists:foldl( fun ({P, _V}, Counts) -> dict:update_counter(P, 1, Counts) - end, dict:new(), priority_queue:to_list(Mailbox)))}} - end. + end, dict:new(), priority_queue:to_list(Mailbox)))} + end}. -- cgit v1.2.1 From fe978993dc3715ff327be2fe9f08b625be416b26 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 22 Jul 2011 11:05:22 +0100 Subject: Sp --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index b2369ec1..1253dc42 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -127,7 +127,7 @@ cluster(ClusterNodes, Force) -> _ -> ok end, - %% Pre-emtively leave the cluster + %% Pre-emptively leave the cluster %% %% We're trying to handle the following two cases: %% 1. We have a two-node cluster, where both nodes are disc nodes. -- cgit v1.2.1 From 0d9371de708f319a9e0f21b5ee1b4d763935e896 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 22 Jul 2011 13:28:04 +0100 Subject: un-misname waiting_for --- src/rabbit_mnesia.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 1253dc42..4ad0ea11 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -346,7 +346,7 @@ ensure_mnesia_not_running() -> yes -> throw({error, mnesia_unexpectedly_running}) end. -waiting_for(Condition) -> +wait_for(Condition) -> error_logger:info_msg("Waiting for ~p...~n", [Condition]), timer:sleep(1000). @@ -633,7 +633,7 @@ new_backup_dir_name(MnesiaDir) -> Year, Month, Day, Hour, Minute, Second])), case filelib:is_file(BackupDir) of false -> BackupDir; - true -> waiting_for(new_backup_dir_name), + true -> wait_for(new_backup_dir_name), new_backup_dir_name(MnesiaDir) end. -- cgit v1.2.1 From 713ec3c21c5efed81f8c3d657e1ebae0cba2278b Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 22 Jul 2011 13:33:58 +0100 Subject: rename IsDiskNode, WasDiskNode My general rule for spelling these things was: if it's followed by node, it should be disc; otherwise, it should be disk. So, is_disc_node vs OnDisk. I managed to miss those two, though. --- src/rabbit_mnesia.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 4ad0ea11..05d0f384 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -481,8 +481,6 @@ delete_previously_running_nodes() -> init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> UClusterNodes = lists:usort(ClusterNodes), ProperClusterNodes = UClusterNodes -- [node()], - IsDiskNode = should_be_disc_node(ClusterNodes), - WasDiskNode = is_disc_node(), case mnesia:change_config(extra_db_nodes, ProperClusterNodes) of {ok, Nodes} -> case Force of @@ -496,9 +494,11 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> end; true -> ok end, + WantDiscNode = should_be_disc_node(ClusterNodes), + WasDiscNode = is_disc_node(), %% We create a new db (on disk, or in ram) in the first %% two cases and attempt to upgrade the in the other two - case {Nodes, WasDiskNode, IsDiskNode} of + case {Nodes, WasDiscNode, WantDiscNode} of {[], _, false} -> %% New ram node; start from scratch ok = create_schema(false); @@ -516,7 +516,7 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> ensure_version_ok( rpc:call(AnotherNode, rabbit_version, recorded, [])), {CopyType, CopyTypeAlt} = - case IsDiskNode of + case WantDiscNode of true -> {disc, disc_copies}; false -> {ram, ram_copies} end, -- cgit v1.2.1 From 89658c418ac65e97aae012dc2b379fc28b092167 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 22 Jul 2011 13:37:20 +0100 Subject: more spacing --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 05d0f384..f94db8af 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -121,7 +121,7 @@ cluster(ClusterNodes, Force) -> %% Wipe mnesia if we're changing type from disc to ram case {is_disc_node(), should_be_disc_node(ClusterNodes)} of {true, false} -> error_logger:warning_msg( - "changing node type; wiping mnesia...~n"), + "changing node type; wiping mnesia...~n~n"), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema); _ -> ok -- cgit v1.2.1 From 18539d5367da128fcd48909ee7414a57e20a44a1 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 22 Jul 2011 16:18:58 +0100 Subject: use mnesia:system_config(use_dir) --- src/rabbit_mnesia.erl | 2 +- src/rabbit_upgrade.erl | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index f94db8af..9d1fe423 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -602,7 +602,7 @@ is_disc_node() -> %% This is pretty ugly but we can't start Mnesia and ask it (will hang), %% we can't look at the config file (may not include us even if we're a %% disc node). - filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). + mnesia:system_info(use_dir). should_be_disc_node(ClusterNodes) -> ClusterNodes == [] orelse lists:member(node(), ClusterNodes). diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index e6b52c61..a6d04f7e 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -144,7 +144,7 @@ upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previously_running_nodes(), - case {rabbit_mnesia:is_disc_node(), AfterUs} of + case {is_disc_node(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -212,7 +212,7 @@ force_tables() -> secondary_upgrade(AllNodes) -> %% must do this before we wipe out schema - IsDiscNode = rabbit_mnesia:is_disc_node(), + IsDiscNode = is_disc_node(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), %% Note that we cluster with all nodes, rather than all disc nodes @@ -276,6 +276,14 @@ lock_filename() -> lock_filename(dir()). lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). backup_dir() -> dir() ++ "-upgrade-backup". +is_disc_node() -> + %% This is pretty ugly but we can't start Mnesia and ask it (will + %% hang), we can't look at the config file (may not include us + %% even if we're a disc node). We also can't use + %% rabbit_mnesia:is_disc_node/0 because that will give false + %% postivies on Rabbit up to 2.5.1. + filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")). + %% NB: we cannot use rabbit_log here since it may not have been %% started yet info(Msg, Args) -> error_logger:info_msg(Msg, Args). -- cgit v1.2.1 From 200cf220a5dbf843b1d4287a325106448c4a201f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Fri, 22 Jul 2011 17:46:23 +0100 Subject: Convert dict to orddict --- src/rabbit_limiter.erl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl index e79583fa..b80ad6cc 100644 --- a/src/rabbit_limiter.erl +++ b/src/rabbit_limiter.erl @@ -49,7 +49,7 @@ -record(lim, {prefetch_count = 0, ch_pid, blocked = false, - queues = dict:new(), % QPid -> {MonitorRef, Notify} + queues = orddict:new(), % QPid -> {MonitorRef, Notify} volume = 0}). %% 'Notify' is a boolean that indicates whether a queue should be %% notified of a change in the limit or volume that may allow it to @@ -196,30 +196,30 @@ limit_reached(#lim{prefetch_count = Limit, volume = Volume}) -> blocked(#lim{blocked = Blocked}) -> Blocked. remember_queue(QPid, State = #lim{queues = Queues}) -> - case dict:is_key(QPid, Queues) of + case orddict:is_key(QPid, Queues) of false -> MRef = erlang:monitor(process, QPid), - State#lim{queues = dict:store(QPid, {MRef, false}, Queues)}; + State#lim{queues = orddict:store(QPid, {MRef, false}, Queues)}; true -> State end. forget_queue(QPid, State = #lim{ch_pid = ChPid, queues = Queues}) -> - case dict:find(QPid, Queues) of + case orddict:find(QPid, Queues) of {ok, {MRef, _}} -> true = erlang:demonitor(MRef), ok = rabbit_amqqueue:unblock(QPid, ChPid), - State#lim{queues = dict:erase(QPid, Queues)}; + State#lim{queues = orddict:erase(QPid, Queues)}; error -> State end. limit_queue(QPid, State = #lim{queues = Queues}) -> UpdateFun = fun ({MRef, _}) -> {MRef, true} end, - State#lim{queues = dict:update(QPid, UpdateFun, Queues)}. + State#lim{queues = orddict:update(QPid, UpdateFun, Queues)}. notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) -> {QList, NewQueues} = - dict:fold(fun (_QPid, {_, false}, Acc) -> Acc; + orddict:fold(fun (_QPid, {_, false}, Acc) -> Acc; (QPid, {MRef, true}, {L, D}) -> - {[QPid | L], dict:store(QPid, {MRef, false}, D)} + {[QPid | L], orddict:store(QPid, {MRef, false}, D)} end, {[], Queues}, Queues), case length(QList) of 0 -> ok; -- cgit v1.2.1 From 2cc159da0cb509ae51ad3b47f786abc3d2e86250 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Mon, 25 Jul 2011 16:07:24 +0100 Subject: improve comments --- src/rabbit_mnesia.erl | 3 --- src/rabbit_upgrade.erl | 6 +++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 9d1fe423..96eedf3c 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -599,9 +599,6 @@ create_schema(OnDisk) -> ok = rabbit_version:record_desired(). is_disc_node() -> - %% This is pretty ugly but we can't start Mnesia and ask it (will hang), - %% we can't look at the config file (may not include us even if we're a - %% disc node). mnesia:system_info(use_dir). should_be_disc_node(ClusterNodes) -> diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl index a6d04f7e..9739f6b7 100644 --- a/src/rabbit_upgrade.erl +++ b/src/rabbit_upgrade.erl @@ -144,7 +144,7 @@ upgrade_mode(AllNodes) -> case nodes_running(AllNodes) of [] -> AfterUs = rabbit_mnesia:read_previously_running_nodes(), - case {is_disc_node(), AfterUs} of + case {is_disc_node_legacy(), AfterUs} of {true, []} -> primary; {true, _} -> @@ -212,7 +212,7 @@ force_tables() -> secondary_upgrade(AllNodes) -> %% must do this before we wipe out schema - IsDiscNode = is_disc_node(), + IsDiscNode = is_disc_node_legacy(), rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema), %% Note that we cluster with all nodes, rather than all disc nodes @@ -276,7 +276,7 @@ lock_filename() -> lock_filename(dir()). lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME). backup_dir() -> dir() ++ "-upgrade-backup". -is_disc_node() -> +is_disc_node_legacy() -> %% This is pretty ugly but we can't start Mnesia and ask it (will %% hang), we can't look at the config file (may not include us %% even if we're a disc node). We also can't use -- cgit v1.2.1 From 909225b6582cbf4248c6698006a97ffe59cad930 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 26 Jul 2011 11:39:16 +0100 Subject: cosmetic and refactoring --- src/rabbit_mnesia.erl | 71 +++++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 39 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 96eedf3c..a07cf960 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -143,13 +143,11 @@ cluster(ClusterNodes, Force) -> %% before we can join it. But, since we don't know if we're in a %% cluster or not, we just pre-emptively leave it before joining. ProperClusterNodes = ClusterNodes -- [node()], - try leave_cluster(ProperClusterNodes, ProperClusterNodes) of - ok -> ok + try + ok = leave_cluster(ProperClusterNodes, ProperClusterNodes) catch - throw:({error, {no_running_cluster_nodes, _, _}} = E) -> - if Force -> ok; - true -> throw(E) - end + {error, {no_running_cluster_nodes, _, _}} when Force -> + ok end, %% Join the cluster @@ -201,17 +199,14 @@ nodes_of_type(Type) -> %% RAM. mnesia:table_info(rabbit_durable_exchange, Type). -table_definitions() -> - table_definitions(is_disc_node()). - %% The tables aren't supposed to be on disk on a ram node -table_definitions(true) -> - real_table_definitions(); -table_definitions(false) -> +table_definitions(disc) -> + table_definitions(); +table_definitions(ram) -> [{Tab, copy_type_to_ram(TabDef)} - || {Tab, TabDef} <- real_table_definitions()]. + || {Tab, TabDef} <- table_definitions()]. -real_table_definitions() -> +table_definitions() -> [{rabbit_user, [{record_name, internal_user}, {attributes, record_info(fields, internal_user)}, @@ -316,10 +311,10 @@ resource_match(Kind) -> #resource{kind = Kind, _='_'}. table_names() -> - [Tab || {Tab, _} <- real_table_definitions()]. + [Tab || {Tab, _} <- table_definitions()]. replicated_table_names() -> - [Tab || {Tab, TabDef} <- real_table_definitions(), + [Tab || {Tab, TabDef} <- table_definitions(), not lists:member({local_content, true}, TabDef) ]. @@ -393,7 +388,11 @@ check_table_content(Tab, TabDef) -> end. check_tables(Fun) -> - case [Error || {Tab, TabDef} <- table_definitions(), + case [Error || {Tab, TabDef} <- table_definitions( + case is_disc_node() of + true -> disc; + false -> ram + end), case Fun(Tab, TabDef) of ok -> Error = none, false; {error, Error} -> true @@ -501,10 +500,10 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> case {Nodes, WasDiscNode, WantDiscNode} of {[], _, false} -> %% New ram node; start from scratch - ok = create_schema(false); + ok = create_schema(ram); {[], false, true} -> %% Nothing there at all, start from scratch - ok = create_schema(true); + ok = create_schema(disc); {[], true, true} -> %% We're the first node up case rabbit_upgrade:maybe_upgrade_local() of @@ -567,7 +566,7 @@ schema_ok_or_move() -> "and recreating schema from scratch~n", [Reason]), ok = move_db(), - ok = create_schema() + ok = create_schema(disc) end. ensure_version_ok({ok, DiscVersion}) -> @@ -579,27 +578,22 @@ ensure_version_ok({ok, DiscVersion}) -> ensure_version_ok({error, _}) -> ok = rabbit_version:record_desired(). -create_schema() -> - create_schema(true). - -create_schema(OnDisk) -> +create_schema(Type) -> mnesia:stop(), - if OnDisk -> - rabbit_misc:ensure_ok(mnesia:create_schema([node()]), - cannot_create_schema); - true -> - %% remove the disc schema since this is a ram node - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema) + case Type of + disc -> rabbit_misc:ensure_ok(mnesia:create_schema([node()]), + cannot_create_schema); + ram -> %% remove the disc schema since this is a ram node + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema) end, rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), - ok = create_tables(OnDisk), + ok = create_tables(Type), ensure_schema_integrity(), ok = rabbit_version:record_desired(). -is_disc_node() -> - mnesia:system_info(use_dir). +is_disc_node() -> mnesia:system_info(use_dir). should_be_disc_node(ClusterNodes) -> ClusterNodes == [] orelse lists:member(node(), ClusterNodes). @@ -638,10 +632,9 @@ copy_db(Destination) -> ok = ensure_mnesia_not_running(), rabbit_misc:recursive_copy(dir(), Destination). -create_tables() -> - create_tables(true). +create_tables() -> create_tables(disc). -create_tables(OnDisk) -> +create_tables(Type) -> lists:foreach(fun ({Tab, TabDef}) -> TabDef1 = proplists:delete(match, TabDef), case mnesia:create_table(Tab, TabDef1) of @@ -651,7 +644,7 @@ create_tables(OnDisk) -> Tab, TabDef1, Reason}}) end end, - table_definitions(OnDisk)), + table_definitions(Type)), ok. copy_type_to_ram(TabDef) -> @@ -687,7 +680,7 @@ create_local_table_copies(Type) -> end, ok = create_local_table_copy(Tab, StorageType) end, - table_definitions(Type =:= disc)), + table_definitions(disc)), ok. create_local_table_copy(Tab, Type) -> -- cgit v1.2.1 From f02231ba2daabfd00853e39a0a5f616a0aa23f2b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 26 Jul 2011 12:00:51 +0100 Subject: Cosmetic --- src/rabbit_mnesia.erl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a07cf960..80495375 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -583,12 +583,11 @@ create_schema(Type) -> case Type of disc -> rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema); - ram -> %% remove the disc schema since this is a ram node - rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), - cannot_delete_schema) + ram -> %% remove the disc schema since this is a ram node + rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), + cannot_delete_schema) end, - rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok = create_tables(Type), ensure_schema_integrity(), ok = rabbit_version:record_desired(). -- cgit v1.2.1 From 4f997aed64f0ea7681d09fc767ecdf0aa5d6aa7a Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Tue, 26 Jul 2011 12:03:35 +0100 Subject: Cosmetic --- src/rabbit_mnesia.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 80495375..7865acbb 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -203,8 +203,7 @@ nodes_of_type(Type) -> table_definitions(disc) -> table_definitions(); table_definitions(ram) -> - [{Tab, copy_type_to_ram(TabDef)} - || {Tab, TabDef} <- table_definitions()]. + [{Tab, copy_type_to_ram(TabDef)} || {Tab, TabDef} <- table_definitions()]. table_definitions() -> [{rabbit_user, -- cgit v1.2.1 From 424055928eb3abb91692658437028a5f9a0ff8ce Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 26 Jul 2011 12:20:25 +0100 Subject: oops --- src/rabbit_mnesia.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index a07cf960..2856af1c 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -680,7 +680,7 @@ create_local_table_copies(Type) -> end, ok = create_local_table_copy(Tab, StorageType) end, - table_definitions(disc)), + table_definitions(Type)), ok. create_local_table_copy(Tab, Type) -> -- cgit v1.2.1 From 81e28d508e8fe0414f523b5f02f3cfd00e3e7899 Mon Sep 17 00:00:00 2001 From: Emile Joubert Date: Tue, 26 Jul 2011 14:09:33 +0100 Subject: More consistent use of macro --- src/rabbit_backing_queue_qc.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index e372e351..e20bba07 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -73,7 +73,7 @@ backing_queue_test(Cmds) -> application:set_env(rabbit, queue_index_max_journal_entries, MaxJournal, infinity), - rabbit_variable_queue:delete_and_terminate(shutdown, BQ), + ?BQMOD:delete_and_terminate(shutdown, BQ), ?WHENFAIL( io:format("Result: ~p~n", [Res]), aggregate(command_names(Cmds), Res =:= ok)). @@ -329,9 +329,9 @@ publish_multiple(Msg, MsgProps, BQ, Count) -> timeout(BQ, 0) -> BQ; timeout(BQ, AtMost) -> - case rabbit_variable_queue:needs_timeout(BQ) of + case ?BQMOD:needs_timeout(BQ) of false -> BQ; - _ -> timeout(rabbit_variable_queue:timeout(BQ), AtMost - 1) + _ -> timeout(?BQMOD:timeout(BQ), AtMost - 1) end. qc_message_payload() -> -- cgit v1.2.1 From 4bd844b22f15ab1d73914a7210622a87b32a40df Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 26 Jul 2011 15:56:48 +0100 Subject: Convert O(N*M) to O(M*log_2(N)) --- src/rabbit_backing_queue_qc.erl | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl index e20bba07..93e8efad 100644 --- a/src/rabbit_backing_queue_qc.erl +++ b/src/rabbit_backing_queue_qc.erl @@ -234,9 +234,7 @@ next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) -> #state{acks = AcksState} = S, BQ1 = {call, erlang, element, [2, Res]}, S#state{bqstate = BQ1, - acks = orddict:filter(fun (AckTag, _) -> - not lists:member(AckTag, AcksArg) - end, AcksState)}; + acks = lists:foldl(fun orddict:erase/2, AcksState, AcksArg)}; next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> #state{len = Len, messages = Messages, acks = AcksState} = S, @@ -246,9 +244,7 @@ next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _F, _V]}) -> S#state{bqstate = BQ1, len = Len + length(RequeueMsgs), messages = queue:join(Messages, queue:from_list(RequeueMsgs)), - acks = orddict:filter(fun (AckTag, _) -> - not lists:member(AckTag, AcksArg) - end, AcksState)}; + acks = lists:foldl(fun orddict:erase/2, AcksState, AcksArg)}; next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) -> S#state{bqstate = BQ}; -- cgit v1.2.1 From 4f0f940bbd406e36b9e6c1663cdfcda6704926db Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Tue, 26 Jul 2011 16:53:03 +0100 Subject: cosmetic --- src/rabbit_limiter.erl | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl index b80ad6cc..8f9ab032 100644 --- a/src/rabbit_limiter.erl +++ b/src/rabbit_limiter.erl @@ -204,11 +204,10 @@ remember_queue(QPid, State = #lim{queues = Queues}) -> forget_queue(QPid, State = #lim{ch_pid = ChPid, queues = Queues}) -> case orddict:find(QPid, Queues) of - {ok, {MRef, _}} -> - true = erlang:demonitor(MRef), - ok = rabbit_amqqueue:unblock(QPid, ChPid), - State#lim{queues = orddict:erase(QPid, Queues)}; - error -> State + {ok, {MRef, _}} -> true = erlang:demonitor(MRef), + ok = rabbit_amqqueue:unblock(QPid, ChPid), + State#lim{queues = orddict:erase(QPid, Queues)}; + error -> State end. limit_queue(QPid, State = #lim{queues = Queues}) -> @@ -218,9 +217,9 @@ limit_queue(QPid, State = #lim{queues = Queues}) -> notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) -> {QList, NewQueues} = orddict:fold(fun (_QPid, {_, false}, Acc) -> Acc; - (QPid, {MRef, true}, {L, D}) -> - {[QPid | L], orddict:store(QPid, {MRef, false}, D)} - end, {[], Queues}, Queues), + (QPid, {MRef, true}, {L, D}) -> + {[QPid | L], orddict:store(QPid, {MRef, false}, D)} + end, {[], Queues}, Queues), case length(QList) of 0 -> ok; L -> -- cgit v1.2.1 From 25a517a4f338ec44b9182e363758eab034d8950d Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 26 Jul 2011 16:57:06 +0100 Subject: +documentation, minor renaming, clarity --- src/gen_server2.erl | 7 ++++++- src/rabbit_amqqueue_process.erl | 7 ++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index d1b24714..5bf28477 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -67,6 +67,11 @@ %% module. Note there is no form also encompassing a reply, thus if %% you wish to reply in handle_call/3 and change the callback module, %% you need to use gen_server2:reply/2 to issue the reply manually. +%% +%% 8) The callback module can optionally implement +%% format_message_queue/2 which is the equivalent of format_status/2 +%% but where the second argument is specifically the priority_queue +%% which contains the prioritised message_queue. %% All modifications are (C) 2009-2011 VMware, Inc. @@ -1163,7 +1168,7 @@ format_status(Opt, StatusData) -> Log = sys:get_debug(log, Debug, []), Specfic = callback_format_status(Opt, Mod, format_status, [PDict, State], [{data, [{"State", State}]}]), - Messages = callback_format_status(Opt, Mod, format_priority_mailbox, Queue, + Messages = callback_format_status(Opt, Mod, format_message_queue, Queue, priority_queue:to_list(Queue)), [{header, Header}, {data, [{"Status", SysState}, diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index f0db479f..4b144ff4 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -35,7 +35,7 @@ -export([init_with_backing_queue_state/7]). --export([format_priority_mailbox/2]). +-export([format_message_queue/2]). %% Queue's state -record(q, {q, @@ -1165,11 +1165,12 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, backing_queue_state = BQS3}, {hibernate, stop_rate_timer(State1)}. -format_priority_mailbox(_Opt, Mailbox) -> +format_message_queue(_Opt, Mailbox) -> Len = priority_queue:len(Mailbox), {Len, case Len > 100 of false -> priority_queue:to_list(Mailbox); - true -> {dict:to_list( + true -> {summary, + dict:to_list( lists:foldl( fun ({P, _V}, Counts) -> dict:update_counter(P, 1, Counts) -- cgit v1.2.1 From 873fbc8c8c910a847834ae869a89a19d5074e21f Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 26 Jul 2011 17:01:51 +0100 Subject: dict => orddict --- src/rabbit_amqqueue_process.erl | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 4b144ff4..4492bbd8 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -1167,12 +1167,13 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, format_message_queue(_Opt, Mailbox) -> Len = priority_queue:len(Mailbox), - {Len, case Len > 100 of - false -> priority_queue:to_list(Mailbox); - true -> {summary, - dict:to_list( - lists:foldl( - fun ({P, _V}, Counts) -> - dict:update_counter(P, 1, Counts) - end, dict:new(), priority_queue:to_list(Mailbox)))} - end}. + {Len, + case Len > 100 of + false -> priority_queue:to_list(Mailbox); + true -> {summary, + orddict:to_list( + lists:foldl( + fun ({P, _V}, Counts) -> + orddict:update_counter(P, 1, Counts) + end, orddict:new(), priority_queue:to_list(Mailbox)))} + end}. -- cgit v1.2.1 From 9433b14ae3f0e864895b0b02837b7598cadfe32c Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 26 Jul 2011 17:24:59 +0100 Subject: Move ha boot steps to somewhere more appropriate --- src/rabbit.erl | 12 ++++++++++++ src/rabbit_mirror_queue_slave_sup.erl | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/rabbit.erl b/src/rabbit.erl index 6ef816c0..e067607d 100644 --- a/src/rabbit.erl +++ b/src/rabbit.erl @@ -134,6 +134,18 @@ {requires, empty_db_check}, {enables, routing_ready}]}). +-rabbit_boot_step({mirror_queue_slave_sup, + [{description, "mirror queue slave sup"}, + {mfa, {rabbit_mirror_queue_slave_sup, start, []}}, + {requires, recovery}, + {enables, routing_ready}]}). + +-rabbit_boot_step({mirrored_queues, + [{description, "adding mirrors to queues"}, + {mfa, {rabbit_mirror_queue_misc, on_node_up, []}}, + {requires, mirror_queue_slave_sup}, + {enables, routing_ready}]}). + -rabbit_boot_step({routing_ready, [{description, "message delivery logic ready"}, {requires, core_initialized}]}). diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl index 879a6017..fc04ec79 100644 --- a/src/rabbit_mirror_queue_slave_sup.erl +++ b/src/rabbit_mirror_queue_slave_sup.erl @@ -16,18 +16,6 @@ -module(rabbit_mirror_queue_slave_sup). --rabbit_boot_step({mirror_queue_slave_sup, - [{description, "mirror queue slave sup"}, - {mfa, {rabbit_mirror_queue_slave_sup, start, []}}, - {requires, recovery}, - {enables, routing_ready}]}). - --rabbit_boot_step({mirrored_queues, - [{description, "adding mirrors to queues"}, - {mfa, {rabbit_mirror_queue_misc, on_node_up, []}}, - {requires, mirror_queue_slave_sup}, - {enables, routing_ready}]}). - -behaviour(supervisor2). -export([start/0, start_link/0, start_child/2]). -- cgit v1.2.1 From a3bc422fa3e94686025d651f9326bb90c46f5158 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Tue, 26 Jul 2011 18:39:12 +0100 Subject: Wipe out slave pids whenever we start up a queue. --- src/rabbit_amqqueue.erl | 3 ++- src/rabbit_mirror_queue_slave.erl | 5 ++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index e9d01d12..d6e10471 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -254,7 +254,8 @@ determine_queue_nodes(Args) -> end. start_queue_process(Node, Q) -> - {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, [Q]), + {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, + [Q#amqqueue{slave_pids = []}]), Q#amqqueue{pid = Pid}. add_default_binding(#amqqueue{name = QueueName}) -> diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index b38a8967..84efb7ca 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -89,9 +89,8 @@ init([#amqqueue { name = QueueName } = Q]) -> %% ASSERTION [] = [Pid || Pid <- [QPid | MPids], node(Pid) =:= Node], MPids1 = MPids ++ [Self], - mnesia:write(rabbit_queue, - Q1 #amqqueue { slave_pids = MPids1 }, - write), + ok = rabbit_amqqueue:store_queue( + Q1 #amqqueue { slave_pids = MPids1 }), {ok, QPid} end), erlang:monitor(process, MPid), -- cgit v1.2.1 From 9a524e52f8fc8e79c27b765b31433127bc780f0e Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 27 Jul 2011 12:43:56 +0100 Subject: It has been decided that masking out on write, is preferable to masking out on read. --- src/rabbit_amqqueue.erl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index d6e10471..1829d09b 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -230,7 +230,7 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) -> end). store_queue(Q = #amqqueue{durable = true}) -> - ok = mnesia:write(rabbit_durable_queue, Q, write), + ok = mnesia:write(rabbit_durable_queue, Q#amqqueue{slave_pids = []}, write), ok = mnesia:write(rabbit_queue, Q, write), ok; store_queue(Q = #amqqueue{durable = false}) -> @@ -254,8 +254,7 @@ determine_queue_nodes(Args) -> end. start_queue_process(Node, Q) -> - {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, - [Q#amqqueue{slave_pids = []}]), + {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, [Q]), Q#amqqueue{pid = Pid}. add_default_binding(#amqqueue{name = QueueName}) -> -- cgit v1.2.1 From 8b59064f8d092871fa3ca504a1bdaf58830e1e01 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Wed, 27 Jul 2011 13:12:11 +0100 Subject: General improvements --- src/gen_server2.erl | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 5bf28477..60471181 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -1166,10 +1166,10 @@ format_status(Opt, StatusData) -> end, Header = lists:concat(["Status for generic server ", NameTag]), Log = sys:get_debug(log, Debug, []), - Specfic = callback_format_status(Opt, Mod, format_status, [PDict, State], - [{data, [{"State", State}]}]), - Messages = callback_format_status(Opt, Mod, format_message_queue, Queue, - priority_queue:to_list(Queue)), + Specfic = callback(Mod, format_status, [Opt, [PDict, State]], + fun () -> [{data, [{"State", State}]}] end), + Messages = callback(Mod, format_message_queue, [Opt, Queue], + fun () -> priority_queue:to_list(Queue) end), [{header, Header}, {data, [{"Status", SysState}, {"Parent", Parent}, @@ -1177,11 +1177,11 @@ format_status(Opt, StatusData) -> {"Queued messages", Messages}]} | Specfic]. -callback_format_status(Opt, Mod, FunName, Args, Default) -> - case erlang:function_exported(Mod, FunName, 2) of - true -> case catch Mod:FunName(Opt, Args) of - {'EXIT', _} -> Default; - Else -> Else - end; - _ -> Default +callback(Mod, FunName, Args, DefaultThunk) -> + case erlang:function_exported(Mod, FunName, length(Args)) of + true -> case catch apply(Mod, FunName, Args) of + {'EXIT', _} -> DefaultThunk(); + Success -> Success + end; + false -> DefaultThunk() end. -- cgit v1.2.1 From e32bf003b8a6a0bc9f65536b0b111d72f536d439 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 27 Jul 2011 15:20:03 +0100 Subject: remove some magic --- src/rabbit_mnesia.erl | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 16bdc7f9..1081f0cb 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -194,10 +194,9 @@ nodes_of_type(Type) -> %% This function should return the nodes of a certain type (ram, %% disc or disc_only) in the current cluster. The type of nodes %% is determined when the cluster is initially configured. - %% Specifically, we check whether a certain table, which we know - %% will be written to disk on a disc node, is stored on disk or in - %% RAM. - mnesia:table_info(rabbit_durable_exchange, Type). + %% Specifically, we check whether the schema, which we know will + %% be written to disk on a disc node, is stored on disk or in RAM. + mnesia:table_info(schema, Type). %% The tables aren't supposed to be on disk on a ram node table_definitions(disc) -> @@ -260,8 +259,6 @@ table_definitions() -> {type, ordered_set}, {match, #topic_trie_binding{trie_binding = trie_binding_match(), _='_'}}]}, - %% Consider the implications to nodes_of_type/1 before altering - %% the next entry. {rabbit_durable_exchange, [{record_name, exchange}, {attributes, record_info(fields, exchange)}, -- cgit v1.2.1 From 99e9d09a4ee6716652f68a4aeedad8e07c385f88 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 28 Jul 2011 13:03:11 +0100 Subject: refactor: use multiple heads instead of 'case' --- src/gen_server2.erl | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 60471181..dad04fa7 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -612,27 +612,19 @@ in(Input, GS2State = #gs2_state { prioritise_info = PI, queue = Queue }) -> GS2State #gs2_state { queue = priority_queue:in( Input, PI(Input, GS2State), Queue) }. -process_msg(Msg, - GS2State = #gs2_state { parent = Parent, - name = Name, - debug = Debug }) -> - case Msg of - {system, From, Req} -> - sys:handle_system_msg( - Req, From, Parent, ?MODULE, Debug, - GS2State); - %% gen_server puts Hib on the end as the 7th arg, but that - %% version of the function seems not to be documented so - %% leaving out for now. - {'EXIT', Parent, Reason} -> - terminate(Reason, Msg, GS2State); - _Msg when Debug =:= [] -> - handle_msg(Msg, GS2State); - _Msg -> - Debug1 = sys:handle_debug(Debug, fun print_event/3, - Name, {in, Msg}), - handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }) - end. +process_msg({system, From, Req}, + GS2State = #gs2_state { parent = Parent, debug = Debug }) -> + sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State); +process_msg({'EXIT', Parent, Reason} = Msg, + GS2State = #gs2_state { parent = Parent }) -> + %% gen_server puts Hib on the end as the 7th arg, but that version + %% of the fun seems not to be documented so leaving out for now. + terminate(Reason, Msg, GS2State); +process_msg(Msg, GS2State = #gs2_state { debug = [] }) -> + handle_msg(Msg, GS2State); +process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) -> + Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}), + handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }). %%% --------------------------------------------------- %%% Send/recive functions -- cgit v1.2.1 From 4a60226bc8b715c2d64cac16e7b59cc0cacafa71 Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 28 Jul 2011 14:45:06 +0100 Subject: Make priority_queue be able to have a concept of infinity as a priority --- src/priority_queue.erl | 37 ++++++++++++++++++++++++++++--------- src/rabbit_tests.erl | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 9 deletions(-) diff --git a/src/priority_queue.erl b/src/priority_queue.erl index 4a94b24b..ccc87cd5 100644 --- a/src/priority_queue.erl +++ b/src/priority_queue.erl @@ -47,7 +47,7 @@ -ifdef(use_specs). --type(priority() :: integer()). +-type(priority() :: integer() | 'infinity'). -type(squeue() :: {queue, [any()], [any()]}). -type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}). @@ -71,8 +71,9 @@ new() -> is_queue({queue, R, F}) when is_list(R), is_list(F) -> true; is_queue({pqueue, Queues}) when is_list(Queues) -> - lists:all(fun ({P, Q}) -> is_integer(P) andalso is_queue(Q) end, - Queues); + lists:all(fun ({infinity, Q}) -> is_queue(Q); + ({P, Q}) -> is_integer(P) andalso is_queue(Q) + end, Queues); is_queue(_) -> false. @@ -89,7 +90,12 @@ len({pqueue, Queues}) -> to_list({queue, In, Out}) when is_list(In), is_list(Out) -> [{0, V} || V <- Out ++ lists:reverse(In, [])]; to_list({pqueue, Queues}) -> - [{-P, V} || {P, Q} <- Queues, {0, V} <- to_list(Q)]. + [{P1, V} || {P, Q} <- Queues, + case P of + infinity -> P1 = P, true; + _ -> P1 = -P, true + end, + {0, V} <- to_list(Q)]. in(Item, Q) -> in(Item, 0, Q). @@ -103,12 +109,23 @@ in(X, Priority, _Q = {queue, [], []}) -> in(X, Priority, Q = {queue, _, _}) -> in(X, Priority, {pqueue, [{0, Q}]}); in(X, Priority, {pqueue, Queues}) -> - P = -Priority, + P = case Priority of + infinity -> Priority; + _ -> -Priority + end, {pqueue, case lists:keysearch(P, 1, Queues) of {value, {_, Q}} -> lists:keyreplace(P, 1, Queues, {P, in(X, Q)}); + false when P == infinity -> + [{P, {queue, [X], []}} | Queues]; false -> - lists:keysort(1, [{P, {queue, [X], []}} | Queues]) + case Queues of + [{infinity, InfQueue} | Queues1] -> + [{infinity, InfQueue} | + lists:keysort(1, [{P, {queue, [X], []}} | Queues1])]; + _ -> + lists:keysort(1, [{P, {queue, [X], []}} | Queues]) + end end}. out({queue, [], []} = Q) -> @@ -141,7 +158,8 @@ join({queue, [], []}, B) -> join({queue, AIn, AOut}, {queue, BIn, BOut}) -> {queue, BIn, AOut ++ lists:reverse(AIn, BOut)}; join(A = {queue, _, _}, {pqueue, BPQ}) -> - {Pre, Post} = lists:splitwith(fun ({P, _}) -> P < 0 end, BPQ), + {Pre, Post} = + lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ), Post1 = case Post of [] -> [ {0, A} ]; [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ]; @@ -149,7 +167,8 @@ join(A = {queue, _, _}, {pqueue, BPQ}) -> end, {pqueue, Pre ++ Post1}; join({pqueue, APQ}, B = {queue, _, _}) -> - {Pre, Post} = lists:splitwith(fun ({P, _}) -> P < 0 end, APQ), + {Pre, Post} = + lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ), Post1 = case Post of [] -> [ {0, B} ]; [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ]; @@ -165,7 +184,7 @@ merge(APQ, [], Acc) -> lists:reverse(Acc, APQ); merge([{P, A}|As], [{P, B}|Bs], Acc) -> merge(As, Bs, [ {P, join(A, B)} | Acc ]); -merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB -> +merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity -> merge(As, Bs, [ {PA, A} | Acc ]); merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) -> merge(As, Bs, [ {PB, B} | Acc ]). diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 2e454411..283a5c4a 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -203,6 +203,42 @@ test_priority_queue() -> {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} = test_priority_queue(Q15), + %% 1-element infinity priority Q + Q16 = priority_queue:in(foo, infinity, Q), + {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16), + + %% add infinity to 0-priority Q + Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)), + {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} = + test_priority_queue(Q17), + + %% and the other way around + Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)), + {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} = + test_priority_queue(Q18), + + %% add infinity to mixed-priority Q + Q19 = priority_queue:in(qux, infinity, Q3), + {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} = + test_priority_queue(Q19), + + %% merge the above with a negative priority Q + Q20 = priority_queue:join(Q19, Q4), + {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}], + [qux, bar, foo, foo]} = test_priority_queue(Q20), + + %% merge two infinity priority queues + Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q), + priority_queue:in(bar, infinity, Q)), + {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} = + test_priority_queue(Q21), + + %% merge two mixed priority with infinity queues + Q22 = priority_queue:join(Q18, Q20), + {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo}, + {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} = + test_priority_queue(Q22), + passed. priority_queue_in_all(Q, L) -> -- cgit v1.2.1 From 0db8533141a584e8ed63ee8c6dc8ba6a41029b1b Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 28 Jul 2011 15:02:11 +0100 Subject: Minor refactor and introduce horrific inefficiency --- src/priority_queue.erl | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/priority_queue.erl b/src/priority_queue.erl index ccc87cd5..34787903 100644 --- a/src/priority_queue.erl +++ b/src/priority_queue.erl @@ -90,12 +90,8 @@ len({pqueue, Queues}) -> to_list({queue, In, Out}) when is_list(In), is_list(Out) -> [{0, V} || V <- Out ++ lists:reverse(In, [])]; to_list({pqueue, Queues}) -> - [{P1, V} || {P, Q} <- Queues, - case P of - infinity -> P1 = P, true; - _ -> P1 = -P, true - end, - {0, V} <- to_list(Q)]. + [{maybe_negate_priority(P), V} || {P, Q} <- Queues, + {0, V} <- to_list(Q)]. in(Item, Q) -> in(Item, 0, Q). @@ -109,10 +105,7 @@ in(X, Priority, _Q = {queue, [], []}) -> in(X, Priority, Q = {queue, _, _}) -> in(X, Priority, {pqueue, [{0, Q}]}); in(X, Priority, {pqueue, Queues}) -> - P = case Priority of - infinity -> Priority; - _ -> -Priority - end, + P = maybe_negate_priority(Priority), {pqueue, case lists:keysearch(P, 1, Queues) of {value, {_, Q}} -> lists:keyreplace(P, 1, Queues, {P, in(X, Q)}); @@ -193,3 +186,6 @@ r2f([]) -> {queue, [], []}; r2f([_] = R) -> {queue, [], R}; r2f([X,Y]) -> {queue, [X], [Y]}; r2f([X,Y|R]) -> {queue, [X,Y], lists:reverse(R, [])}. + +maybe_negate_priority(infinity) -> infinity; +maybe_negate_priority(P) -> -P. -- cgit v1.2.1 From bd6630d835fbaecf924d7feff979b8f6bc6eeadc Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 28 Jul 2011 15:02:52 +0100 Subject: Don't call prioritise_info for EXIT from parent. Use infinity priority for system msgs and EXIT from parent --- src/gen_server2.erl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index dad04fa7..d4d583a0 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -608,6 +608,13 @@ in({'$gen_call', From, Msg}, GS2State = #gs2_state { prioritise_call = PC, GS2State #gs2_state { queue = priority_queue:in( {'$gen_call', From, Msg}, PC(Msg, From, GS2State), Queue) }; +in({'EXIT', Parent, Reason}, + GS2State = #gs2_state { parent = Parent, queue = Queue }) -> + GS2State #gs2_state { queue = priority_queue:in( + {'EXIT', Parent, Reason}, infinity, Queue) }; +in({system, From, Req}, GS2State = #gs2_state { queue = Queue }) -> + GS2State #gs2_state { queue = priority_queue:in( + {system, From, Req}, infinity, Queue) }; in(Input, GS2State = #gs2_state { prioritise_info = PI, queue = Queue }) -> GS2State #gs2_state { queue = priority_queue:in( Input, PI(Input, GS2State), Queue) }. -- cgit v1.2.1 From 1d51127ac43821c56d4ce15169faa48f191e7eee Mon Sep 17 00:00:00 2001 From: Matthew Sackman Date: Thu, 28 Jul 2011 15:11:43 +0100 Subject: Don't unpack and repack --- src/gen_server2.erl | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index d4d583a0..64a3502c 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -598,23 +598,19 @@ adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO, CurrentTO1 = Base + Extra, {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}. -in({'$gen_cast', Msg}, GS2State = #gs2_state { prioritise_cast = PC, - queue = Queue }) -> +in({'$gen_cast', Msg} = Input, + GS2State = #gs2_state { prioritise_cast = PC, queue = Queue }) -> GS2State #gs2_state { queue = priority_queue:in( - {'$gen_cast', Msg}, - PC(Msg, GS2State), Queue) }; -in({'$gen_call', From, Msg}, GS2State = #gs2_state { prioritise_call = PC, - queue = Queue }) -> + Input, PC(Msg, GS2State), Queue) }; +in({'$gen_call', From, Msg} = Input, + GS2State = #gs2_state { prioritise_call = PC, queue = Queue }) -> GS2State #gs2_state { queue = priority_queue:in( - {'$gen_call', From, Msg}, - PC(Msg, From, GS2State), Queue) }; -in({'EXIT', Parent, Reason}, + Input, PC(Msg, From, GS2State), Queue) }; +in({'EXIT', Parent, _Reason} = Input, GS2State = #gs2_state { parent = Parent, queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in( - {'EXIT', Parent, Reason}, infinity, Queue) }; -in({system, From, Req}, GS2State = #gs2_state { queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in( - {system, From, Req}, infinity, Queue) }; + GS2State #gs2_state { queue = priority_queue:in(Input, infinity, Queue) }; +in({system, _From, _Req} = Input, GS2State = #gs2_state { queue = Queue }) -> + GS2State #gs2_state { queue = priority_queue:in(Input, infinity, Queue) }; in(Input, GS2State = #gs2_state { prioritise_info = PI, queue = Queue }) -> GS2State #gs2_state { queue = priority_queue:in( Input, PI(Input, GS2State), Queue) }. -- cgit v1.2.1 From eeb689c2cddf9c456ae6991905128e9a8ff47f06 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 28 Jul 2011 16:16:42 +0100 Subject: strip out the new backup naming scheme since it isn't necessary anymore --- src/rabbit_mnesia.erl | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 16bdc7f9..b52ad3f9 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -340,10 +340,6 @@ ensure_mnesia_not_running() -> yes -> throw({error, mnesia_unexpectedly_running}) end. -wait_for(Condition) -> - error_logger:info_msg("Waiting for ~p...~n", [Condition]), - timer:sleep(1000). - ensure_schema_integrity() -> case check_schema_integrity() of ok -> @@ -599,7 +595,11 @@ should_be_disc_node(ClusterNodes) -> move_db() -> mnesia:stop(), MnesiaDir = filename:dirname(dir() ++ "/"), - BackupDir = new_backup_dir_name(MnesiaDir), + {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(), + BackupDir = lists:flatten( + io_lib:format("~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w", + [MnesiaDir, + Year, Month, Day, Hour, Minute, Second])), case file:rename(MnesiaDir, BackupDir) of ok -> %% NB: we cannot use rabbit_log here since it may not have @@ -614,18 +614,6 @@ move_db() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), ok. -new_backup_dir_name(MnesiaDir) -> - {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(), - BackupDir = lists:flatten( - io_lib:format("~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w", - [MnesiaDir, - Year, Month, Day, Hour, Minute, Second])), - case filelib:is_file(BackupDir) of - false -> BackupDir; - true -> wait_for(new_backup_dir_name), - new_backup_dir_name(MnesiaDir) - end. - copy_db(Destination) -> ok = ensure_mnesia_not_running(), rabbit_misc:recursive_copy(dir(), Destination). -- cgit v1.2.1 From 28e9f6d79feea807f0c35960c37f932a0c60e1bb Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 28 Jul 2011 16:26:55 +0100 Subject: refactor --- src/gen_server2.erl | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/gen_server2.erl b/src/gen_server2.erl index 64a3502c..35258139 100644 --- a/src/gen_server2.erl +++ b/src/gen_server2.erl @@ -599,21 +599,20 @@ adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO, {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}. in({'$gen_cast', Msg} = Input, - GS2State = #gs2_state { prioritise_cast = PC, queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in( - Input, PC(Msg, GS2State), Queue) }; + GS2State = #gs2_state { prioritise_cast = PC }) -> + in(Input, PC(Msg, GS2State), GS2State); in({'$gen_call', From, Msg} = Input, - GS2State = #gs2_state { prioritise_call = PC, queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in( - Input, PC(Msg, From, GS2State), Queue) }; -in({'EXIT', Parent, _Reason} = Input, - GS2State = #gs2_state { parent = Parent, queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in(Input, infinity, Queue) }; -in({system, _From, _Req} = Input, GS2State = #gs2_state { queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in(Input, infinity, Queue) }; -in(Input, GS2State = #gs2_state { prioritise_info = PI, queue = Queue }) -> - GS2State #gs2_state { queue = priority_queue:in( - Input, PI(Input, GS2State), Queue) }. + GS2State = #gs2_state { prioritise_call = PC }) -> + in(Input, PC(Msg, From, GS2State), GS2State); +in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) -> + in(Input, infinity, GS2State); +in({system, _From, _Req} = Input, GS2State) -> + in(Input, infinity, GS2State); +in(Input, GS2State = #gs2_state { prioritise_info = PI }) -> + in(Input, PI(Input, GS2State), GS2State). + +in(Input, Priority, GS2State = #gs2_state { queue = Queue }) -> + GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }. process_msg({system, From, Req}, GS2State = #gs2_state { parent = Parent, debug = Debug }) -> -- cgit v1.2.1 From ce55070fd5ac38b74e6bbdd72efe08006eff04b5 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 28 Jul 2011 16:30:48 +0100 Subject: abstract out mnesia:start and mnesia:stop --- src/rabbit_mnesia.erl | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index b52ad3f9..d398cd87 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -151,13 +151,13 @@ cluster(ClusterNodes, Force) -> end, %% Join the cluster - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + start_mnesia(), try ok = init_db(ClusterNodes, Force, fun maybe_upgrade_local_or_record_desired/0), ok = create_cluster_nodes_config(ClusterNodes) after - mnesia:stop() + stop_mnesia() end, ok. @@ -522,9 +522,7 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) -> %% We've taken down mnesia, so ram nodes will need %% to re-sync case is_disc_node() of - false -> rabbit_misc:ensure_ok(mnesia:start(), - cannot_start_mnesia), - ensure_mnesia_running(), + false -> start_mnesia(), mnesia:change_config(extra_db_nodes, ProperClusterNodes), wait_for_replicated_tables(); @@ -574,7 +572,7 @@ ensure_version_ok({error, _}) -> ok = rabbit_version:record_desired(). create_schema(Type) -> - mnesia:stop(), + stop_mnesia(), case Type of disc -> rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema); @@ -582,7 +580,7 @@ create_schema(Type) -> rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema) end, - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + start_mnesia(), ok = create_tables(Type), ensure_schema_integrity(), ok = rabbit_version:record_desired(). @@ -593,7 +591,7 @@ should_be_disc_node(ClusterNodes) -> ClusterNodes == [] orelse lists:member(node(), ClusterNodes). move_db() -> - mnesia:stop(), + stop_mnesia(), MnesiaDir = filename:dirname(dir() ++ "/"), {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(), BackupDir = lists:flatten( @@ -611,7 +609,7 @@ move_db() -> MnesiaDir, BackupDir, Reason}}) end, ensure_mnesia_dir(), - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + start_mnesia(), ok. copy_db(Destination) -> @@ -702,14 +700,14 @@ reset(Force) -> true -> ok; false -> ensure_mnesia_dir(), - rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + start_mnesia(), {Nodes, RunningNodes} = try ok = init(), {all_clustered_nodes() -- [Node], running_clustered_nodes() -- [Node]} after - mnesia:stop() + stop_mnesia() end, leave_cluster(Nodes, RunningNodes), rabbit_misc:ensure_ok(mnesia:delete_schema([Node]), @@ -744,3 +742,10 @@ leave_cluster(Nodes, RunningNodes) -> Nodes, RunningNodes}}) end. +start_mnesia() -> + rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia), + ensure_mnesia_running(). + +stop_mnesia() -> + stopped = mnesia:stop(), + ensure_mnesia_not_running(). -- cgit v1.2.1 From ead345cec20226e8d3a2b9681ff059fe7ca963d5 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 28 Jul 2011 17:32:49 +0100 Subject: silence unzip --- packaging/windows-exe/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/windows-exe/Makefile b/packaging/windows-exe/Makefile index 59803f9c..289621a0 100644 --- a/packaging/windows-exe/Makefile +++ b/packaging/windows-exe/Makefile @@ -10,7 +10,7 @@ rabbitmq-$(VERSION).nsi: rabbitmq_nsi.in $< > $@ rabbitmq_server-$(VERSION): - unzip $(ZIP) + unzip -q $(ZIP) clean: rm -rf rabbitmq-*.nsi rabbitmq_server-* rabbitmq-server-*.exe -- cgit v1.2.1 From ab6b089c195662804fd9dce6f78c4dbc3c25ee91 Mon Sep 17 00:00:00 2001 From: Matthias Radestock Date: Thu, 28 Jul 2011 18:08:36 +0100 Subject: employ more sophisticated message queue formatting ...in the channel, queue and msg_store --- src/priority_queue.erl | 3 +++ src/rabbit_amqqueue_process.erl | 17 ++--------------- src/rabbit_channel.erl | 4 +++- src/rabbit_misc.erl | 23 +++++++++++++++++++++++ src/rabbit_msg_store.erl | 5 ++++- 5 files changed, 35 insertions(+), 17 deletions(-) diff --git a/src/priority_queue.erl b/src/priority_queue.erl index 34787903..4fc8b469 100644 --- a/src/priority_queue.erl +++ b/src/priority_queue.erl @@ -47,6 +47,9 @@ -ifdef(use_specs). +-export_type([q/0]). + +-type(q() :: pqueue()). -type(priority() :: integer() | 'infinity'). -type(squeue() :: {queue, [any()], [any()]}). -type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 4492bbd8..c6019413 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -31,12 +31,10 @@ -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, - prioritise_cast/2, prioritise_info/2]). + prioritise_cast/2, prioritise_info/2, format_message_queue/2]). -export([init_with_backing_queue_state/7]). --export([format_message_queue/2]). - %% Queue's state -record(q, {q, exclusive_consumer, @@ -1165,15 +1163,4 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, backing_queue_state = BQS3}, {hibernate, stop_rate_timer(State1)}. -format_message_queue(_Opt, Mailbox) -> - Len = priority_queue:len(Mailbox), - {Len, - case Len > 100 of - false -> priority_queue:to_list(Mailbox); - true -> {summary, - orddict:to_list( - lists:foldl( - fun ({P, _V}, Counts) -> - orddict:update_counter(P, 1, Counts) - end, orddict:new(), priority_queue:to_list(Mailbox)))} - end}. +format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ). diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index f398fcc5..13fb7ce1 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -27,7 +27,7 @@ -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, - prioritise_cast/2]). + prioritise_cast/2, format_message_queue/2]). -record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, limiter_pid, start_limiter_fun, tx_status, next_tag, @@ -344,6 +344,8 @@ terminate(Reason, State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. +format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ). + %%--------------------------------------------------------------------------- reply(Reply, NewState) -> reply(Reply, [], NewState). diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index b6b97f6d..3bbfb1d7 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -57,6 +57,7 @@ -export([ntoa/1, ntoab/1]). -export([is_process_alive/1]). -export([pget/2, pget/3, pget_or_die/2]). +-export([format_message_queue/2]). %%---------------------------------------------------------------------------- @@ -205,6 +206,7 @@ -spec(pget/2 :: (term(), [term()]) -> term()). -spec(pget/3 :: (term(), [term()], term()) -> term()). -spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()). +-spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()). -endif. @@ -919,3 +921,24 @@ pget_or_die(K, P) -> undefined -> exit({error, key_missing, K}); V -> V end. + +format_message_queue(_Opt, MQ) -> + Len = priority_queue:len(MQ), + {Len, + case Len > 100 of + false -> priority_queue:to_list(MQ); + true -> {summary, + orddict:to_list( + lists:foldl( + fun ({P, V}, Counts) -> + orddict:update_counter( + {P, format_message_queue_entry(V)}, 1, Counts) + end, orddict:new(), priority_queue:to_list(MQ)))} + end}. + +format_message_queue_entry(V) when is_atom(V) -> + V; +format_message_queue_entry(V) when is_tuple(V) -> + list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]); +format_message_queue_entry(_V) -> + '_'. diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 3f4162cd..27de1f77 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -29,7 +29,8 @@ -export([transform_dir/3, force_recovery/2]). %% upgrade -export([init/1, handle_call/3, handle_cast/2, handle_info/2, - terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]). + terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2, + format_message_queue/2]). %%---------------------------------------------------------------------------- @@ -836,6 +837,8 @@ terminate(_Reason, State = #msstate { index_state = IndexState, code_change(_OldVsn, State, _Extra) -> {ok, State}. +format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ). + %%---------------------------------------------------------------------------- %% general helper functions %%---------------------------------------------------------------------------- -- cgit v1.2.1 From c604f7a99ffdb6ed4e9e5e28903c588c760a328e Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 28 Jul 2011 18:23:06 +0100 Subject: Make sure every shortcut has "RabbitMQ" in its name, and be a bit more consistent. --- packaging/windows-exe/rabbitmq_nsi.in | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in index 1ed4064e..27e4e1dc 100644 --- a/packaging/windows-exe/rabbitmq_nsi.in +++ b/packaging/windows-exe/rabbitmq_nsi.in @@ -113,17 +113,17 @@ Section "Start Menu" RabbitStartMenu CreateDirectory "$APPDATA\RabbitMQ\db" CreateDirectory "$SMPROGRAMS\RabbitMQ Server" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "$INSTDIR\uninstall.exe" 0 - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Plugins Directory.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\plugins" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Log Directory.lnk" "$APPDATA\RabbitMQ\log" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Database Directory.lnk" "$APPDATA\RabbitMQ\db" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\(Re)Install Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "install" "$INSTDIR\rabbitmq.ico" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Remove Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "remove" "$INSTDIR\rabbitmq.ico" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Start Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "start" "$INSTDIR\rabbitmq.ico" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Stop Service.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "stop" "$INSTDIR\rabbitmq.ico" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Uninstall RabbitMQ.lnk" "$INSTDIR\uninstall.exe" "" "$INSTDIR\uninstall.exe" 0 + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Plugins.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\plugins" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Logs.lnk" "$APPDATA\RabbitMQ\log" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Database Directory.lnk" "$APPDATA\RabbitMQ\db" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - (re)install.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "install" "$INSTDIR\rabbitmq.ico" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - remove.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "remove" "$INSTDIR\rabbitmq.ico" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - start.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "start" "$INSTDIR\rabbitmq.ico" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - stop.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "stop" "$INSTDIR\rabbitmq.ico" SetOutPath "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin" - CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Command Prompt (sbin dir).lnk" "$WINDIR\system32\cmd.exe" "" "$WINDIR\system32\cmd.exe" + CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Command Prompt (sbin dir).lnk" "$WINDIR\system32\cmd.exe" "" "$WINDIR\system32\cmd.exe" SetOutPath $INSTDIR SectionEnd -- cgit v1.2.1 From 3df28de3cf1eeb1abbe89d831b05e8480e1d5d44 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 28 Jul 2011 18:30:30 +0100 Subject: silence zips Gzips don't normally print anything, so they're fine. --- Makefile | 2 +- packaging/windows/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 495689fb..ee2700af 100644 --- a/Makefile +++ b/Makefile @@ -238,7 +238,7 @@ srcdist: distclean chmod 0755 $(TARGET_SRC_DIR)/scripts/* (cd dist; tar -zcf $(TARBALL_NAME).tar.gz $(TARBALL_NAME)) - (cd dist; zip -r $(TARBALL_NAME).zip $(TARBALL_NAME)) + (cd dist; zip -q -r $(TARBALL_NAME).zip $(TARBALL_NAME)) rm -rf $(TARGET_SRC_DIR) distclean: clean diff --git a/packaging/windows/Makefile b/packaging/windows/Makefile index dacfa620..6239dcc5 100644 --- a/packaging/windows/Makefile +++ b/packaging/windows/Makefile @@ -24,7 +24,7 @@ dist: elinks -dump -no-references -no-numbering rabbitmq-service.html \ > $(TARGET_DIR)/readme-service.txt todos $(TARGET_DIR)/readme-service.txt - zip -r $(TARGET_ZIP).zip $(TARGET_DIR) + zip -q -r $(TARGET_ZIP).zip $(TARGET_DIR) rm -rf $(TARGET_DIR) rabbitmq-service.html clean: clean_partial -- cgit v1.2.1 From db402d5dc41296a0543b8fec1fb5692bf2916693 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 29 Jul 2011 09:56:45 +0100 Subject: silence tar --- packaging/debs/Debian/Makefile | 2 +- packaging/generic-unix/Makefile | 2 +- packaging/windows/Makefile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/debs/Debian/Makefile b/packaging/debs/Debian/Makefile index 31979a8e..38c81134 100644 --- a/packaging/debs/Debian/Makefile +++ b/packaging/debs/Debian/Makefile @@ -19,7 +19,7 @@ all: package: clean cp $(TARBALL_DIR)/$(TARBALL) $(DEBIAN_ORIG_TARBALL) - tar -zxvf $(DEBIAN_ORIG_TARBALL) + tar -zxf $(DEBIAN_ORIG_TARBALL) cp -r debian $(UNPACKED_DIR) cp $(COMMON_DIR)/* $(UNPACKED_DIR)/debian/ # Debian and descendants differ from most other distros in that diff --git a/packaging/generic-unix/Makefile b/packaging/generic-unix/Makefile index c4e01f4a..b5c342aa 100644 --- a/packaging/generic-unix/Makefile +++ b/packaging/generic-unix/Makefile @@ -4,7 +4,7 @@ TARGET_DIR=rabbitmq_server-$(VERSION) TARGET_TARBALL=rabbitmq-server-generic-unix-$(VERSION) dist: - tar -zxvf ../../dist/$(SOURCE_DIR).tar.gz + tar -zxf ../../dist/$(SOURCE_DIR).tar.gz $(MAKE) -C $(SOURCE_DIR) \ TARGET_DIR=`pwd`/$(TARGET_DIR) \ diff --git a/packaging/windows/Makefile b/packaging/windows/Makefile index 6239dcc5..a0be8d89 100644 --- a/packaging/windows/Makefile +++ b/packaging/windows/Makefile @@ -4,7 +4,7 @@ TARGET_DIR=rabbitmq_server-$(VERSION) TARGET_ZIP=rabbitmq-server-windows-$(VERSION) dist: - tar -zxvf ../../dist/$(SOURCE_DIR).tar.gz + tar -zxf ../../dist/$(SOURCE_DIR).tar.gz $(MAKE) -C $(SOURCE_DIR) mkdir $(SOURCE_DIR)/sbin -- cgit v1.2.1 From 6bb7d212f9f352832167c2c1995f67a1e2143bdb Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Fri, 29 Jul 2011 10:26:31 +0100 Subject: silence nsis --- packaging/windows-exe/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/windows-exe/Makefile b/packaging/windows-exe/Makefile index 289621a0..ab50e30b 100644 --- a/packaging/windows-exe/Makefile +++ b/packaging/windows-exe/Makefile @@ -2,7 +2,7 @@ VERSION=0.0.0 ZIP=../windows/rabbitmq-server-windows-$(VERSION) dist: rabbitmq-$(VERSION).nsi rabbitmq_server-$(VERSION) - makensis rabbitmq-$(VERSION).nsi + makensis -V2 rabbitmq-$(VERSION).nsi rabbitmq-$(VERSION).nsi: rabbitmq_nsi.in sed \ -- cgit v1.2.1 From a89a2b3ac7d10f87d5d42191018db2a95bf77962 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Fri, 29 Jul 2011 13:24:50 +0100 Subject: That comment feels very redundant now, this is no longer hard. --- src/rabbit_mnesia.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl index 1081f0cb..0ca0ec2f 100644 --- a/src/rabbit_mnesia.erl +++ b/src/rabbit_mnesia.erl @@ -194,8 +194,6 @@ nodes_of_type(Type) -> %% This function should return the nodes of a certain type (ram, %% disc or disc_only) in the current cluster. The type of nodes %% is determined when the cluster is initially configured. - %% Specifically, we check whether the schema, which we know will - %% be written to disk on a disc node, is stored on disk or in RAM. mnesia:table_info(schema, Type). %% The tables aren't supposed to be on disk on a ram node -- cgit v1.2.1 From 5d2fba42902b1a09eb911dc77fd1a0b5c7c8f969 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 11:11:14 +0100 Subject: refactor --- src/rabbit_amqqueue.erl | 15 ++------------- src/rabbit_amqqueue_process.erl | 25 +++++++++++-------------- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 6024db65..977cd241 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -32,8 +32,8 @@ %% internal -export([internal_declare/2, internal_delete/1, run_backing_queue/3, - sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2, - set_maximum_since_use/2, maybe_expire/1, drop_expired/1]). + sync_timeout/1, set_ram_duration_target/2, + set_maximum_since_use/2]). -include("rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). @@ -141,10 +141,8 @@ (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). -spec(sync_timeout/1 :: (pid()) -> 'ok'). --spec(update_ram_duration/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). -spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). --spec(maybe_expire/1 :: (pid()) -> 'ok'). -spec(on_node_down/1 :: (node()) -> 'ok'). -spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()). @@ -484,21 +482,12 @@ run_backing_queue(QPid, Mod, Fun) -> sync_timeout(QPid) -> gen_server2:cast(QPid, sync_timeout). -update_ram_duration(QPid) -> - gen_server2:cast(QPid, update_ram_duration). - set_ram_duration_target(QPid, Duration) -> gen_server2:cast(QPid, {set_ram_duration_target, Duration}). set_maximum_since_use(QPid, Age) -> gen_server2:cast(QPid, {set_maximum_since_use, Age}). -maybe_expire(QPid) -> - gen_server2:cast(QPid, maybe_expire). - -drop_expired(QPid) -> - gen_server2:cast(QPid, drop_expired). - on_node_down(Node) -> rabbit_misc:execute_mnesia_tx_with_tail( fun () -> Dels = qlc:e(qlc:q([delete_queue(QueueName) || diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index e787fa84..c7b9eaab 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -275,13 +275,13 @@ stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> - _ = erlang:cancel_timer(TRef), + erlang:cancel_timer(TRef), State#q{rate_timer_ref = undefined}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) -> - _ = erlang:cancel_timer(TRef), + erlang:cancel_timer(TRef), State#q{expiry_timer_ref = undefined}. %% We wish to expire only when there are no consumers *and* the expiry @@ -787,12 +787,9 @@ prioritise_call(Msg, _From, _State) -> prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; delete_immediately -> 8; {set_ram_duration_target, _Duration} -> 8; {set_maximum_since_use, _Age} -> 8; - maybe_expire -> 8; - drop_expired -> 8; emit_stats -> 7; {ack, _AckTags, _ChPid} -> 7; {reject, _AckTags, _Requeue, _ChPid} -> 7; @@ -1096,15 +1093,6 @@ handle_cast({set_maximum_since_use, Age}, State) -> ok = file_handle_cache:set_maximum_since_use(Age), noreply(State). -handle_info(update_ram_duration, State = #q{backing_queue = BQ, - backing_queue_state = BQS}) -> - {RamDuration, BQS1} = BQ:ram_duration(BQS), - DesiredDuration = - rabbit_memory_monitor:report_ram_duration(self(), RamDuration), - BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), - noreply(State#q{rate_timer_ref = just_measured, - backing_queue_state = BQS2}); - handle_info(maybe_expire, State) -> case is_unused(State) of true -> ?LOGDEBUG("Queue lease expired for ~p~n", [State#q.q]), @@ -1137,6 +1125,15 @@ handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) -> {stop, NewState} -> {stop, normal, NewState} end; +handle_info(update_ram_duration, State = #q{backing_queue = BQ, + backing_queue_state = BQS}) -> + {RamDuration, BQS1} = BQ:ram_duration(BQS), + DesiredDuration = + rabbit_memory_monitor:report_ram_duration(self(), RamDuration), + BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), + noreply(State#q{rate_timer_ref = just_measured, + backing_queue_state = BQS2}); + handle_info(timeout, State) -> noreply(backing_queue_timeout(State)); -- cgit v1.2.1 From c6b1df765ff968a0b5e0686fef143351143820ff Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 11:41:23 +0100 Subject: some more refactoring --- src/rabbit_amqqueue_process.erl | 5 ++--- src/rabbit_channel.erl | 12 ++++++------ src/rabbit_event.erl | 2 +- src/rabbit_misc.erl | 8 +++++++- src/rabbit_msg_store.erl | 2 +- src/rabbit_tests.erl | 2 +- 6 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index c7b9eaab..403c39fc 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -275,13 +275,13 @@ stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> - erlang:cancel_timer(TRef), + rabbit_misc:cancel_timer(TRef), State#q{rate_timer_ref = undefined}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) -> - erlang:cancel_timer(TRef), + rabbit_misc:cancel_timer(TRef), State#q{expiry_timer_ref = undefined}. %% We wish to expire only when there are no consumers *and* the expiry @@ -790,7 +790,6 @@ prioritise_cast(Msg, _State) -> delete_immediately -> 8; {set_ram_duration_target, _Duration} -> 8; {set_maximum_since_use, _Age} -> 8; - emit_stats -> 7; {ack, _AckTags, _ChPid} -> 7; {reject, _AckTags, _Requeue, _ChPid} -> 7; {notify_sent, _ChPid} -> 7; diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 6fbbc93e..4dd38fd6 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -23,7 +23,7 @@ -export([start_link/10, do/2, do/3, flush/1, shutdown/1]). -export([send_command/2, deliver/4, flushed/2, confirm/2]). -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]). --export([refresh_config_all/0, emit_stats/1, ready_for_close/1]). +-export([refresh_config_all/0, ready_for_close/1]). -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, @@ -91,7 +91,6 @@ -spec(info_all/0 :: () -> [rabbit_types:infos()]). -spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]). -spec(refresh_config_all/0 :: () -> 'ok'). --spec(emit_stats/1 :: (pid()) -> 'ok'). -spec(ready_for_close/1 :: (pid()) -> 'ok'). -endif. @@ -153,9 +152,6 @@ refresh_config_all() -> fun (C) -> gen_server2:call(C, refresh_config) end, list()), ok. -emit_stats(Pid) -> - gen_server2:cast(Pid, emit_stats). - ready_for_close(Pid) -> gen_server2:cast(Pid, ready_for_close). @@ -209,11 +205,15 @@ prioritise_call(Msg, _From, _State) -> prioritise_cast(Msg, _State) -> case Msg of - emit_stats -> 7; {confirm, _MsgSeqNos, _QPid} -> 5; _ -> 0 end. +prioritise_info(Msg, _State) -> + case Msg of + emit_stats -> 7 + end. + handle_call(flush, _From, State) -> reply(ok, State); diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index 887e4a1f..ef098a45 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -113,7 +113,7 @@ stop_stats_timer(State = #state{level = none}) -> stop_stats_timer(State = #state{timer = undefined}) -> State; stop_stats_timer(State = #state{timer = TRef}) -> - _ = erlang:cancel_timer(TRef), + rabbit_misc:cancel_timer(TRef), State#state{timer = undefined}. reset_stats_timer(State) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 3bbfb1d7..337e3d6f 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -51,7 +51,7 @@ -export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). --export([now_ms/0]). +-export([now_ms/0, cancel_timer/1]). -export([lock_file/1]). -export([const_ok/0, const/1]). -export([ntoa/1, ntoab/1]). @@ -832,6 +832,12 @@ get_flag(_, []) -> now_ms() -> timer:now_diff(now(), {0,0,0}) div 1000. +cancel_timer(TRef) -> + case erlang:cancel_timer(TRef) of + false -> throw({not_a_valid_timer, TRef}); + _ -> ok + end. + module_attributes(Module) -> case catch Module:module_info(attributes) of {'EXIT', {undef, [{Module, module_info, _} | _]}} -> diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 6c5035a0..54424803 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -873,7 +873,7 @@ start_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> stop_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> State; stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> - _ = erlang:cancel_timer(TRef), + rabbit_misc:cancel_timer(TRef), State #msstate { sync_timer_ref = undefined }. internal_sync(State = #msstate { current_file_handle = CurHdl, diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl index 2a3ced92..ed4efb47 100644 --- a/src/rabbit_tests.erl +++ b/src/rabbit_tests.erl @@ -1287,7 +1287,7 @@ test_statistics_event_receiver(Pid) -> test_statistics_receive_event(Ch, Matcher) -> rabbit_channel:flush(Ch), - rabbit_channel:emit_stats(Ch), + Ch ! emit_stats, test_statistics_receive_event1(Ch, Matcher). test_statistics_receive_event1(Ch, Matcher) -> -- cgit v1.2.1 From 7083ee430fb23719cb599e4ddc185fa97163c396 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 11:47:23 +0100 Subject: convert amqqueue_process's sync timer --- src/rabbit_amqqueue.erl | 7 +------ src/rabbit_amqqueue_process.erl | 23 +++++++++++------------ 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl index 977cd241..eae6312b 100644 --- a/src/rabbit_amqqueue.erl +++ b/src/rabbit_amqqueue.erl @@ -32,8 +32,7 @@ %% internal -export([internal_declare/2, internal_delete/1, run_backing_queue/3, - sync_timeout/1, set_ram_duration_target/2, - set_maximum_since_use/2]). + set_ram_duration_target/2, set_maximum_since_use/2]). -include("rabbit.hrl"). -include_lib("stdlib/include/qlc.hrl"). @@ -140,7 +139,6 @@ -spec(run_backing_queue/3 :: (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok'). --spec(sync_timeout/1 :: (pid()) -> 'ok'). -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok'). -spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok'). -spec(on_node_down/1 :: (node()) -> 'ok'). @@ -479,9 +477,6 @@ internal_delete(QueueName) -> run_backing_queue(QPid, Mod, Fun) -> gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}). -sync_timeout(QPid) -> - gen_server2:cast(QPid, sync_timeout). - set_ram_duration_target(QPid, Duration) -> gen_server2:cast(QPid, {set_ram_duration_target, Duration}). diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 403c39fc..eee899b3 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -249,8 +249,7 @@ backing_queue_module(#amqqueue{arguments = Args}) -> end. ensure_sync_timer(State = #q{sync_timer_ref = undefined}) -> - {ok, TRef} = timer:apply_after( - ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), + TRef = erlang:send_after(?SYNC_INTERVAL, self(), sync_timeout), State#q{sync_timer_ref = TRef}; ensure_sync_timer(State) -> State. @@ -258,7 +257,7 @@ ensure_sync_timer(State) -> stop_sync_timer(State = #q{sync_timer_ref = undefined}) -> State; stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> - {ok, cancel} = timer:cancel(TRef), + rabbit_misc:cancel_timer(TRef), State#q{sync_timer_ref = undefined}. ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> @@ -795,7 +794,6 @@ prioritise_cast(Msg, _State) -> {notify_sent, _ChPid} -> 7; {unblock, _ChPid} -> 7; {run_backing_queue, _Mod, _Fun} -> 6; - sync_timeout -> 6; _ -> 0 end. @@ -803,11 +801,12 @@ prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, #q{q = #amqqueue{exclusive_owner = DownPid}}) -> 8; prioritise_info(Msg, _State) -> case Msg of - update_ram_duration -> 8; - maybe_expire -> 8; - drop_expired -> 8; - emit_stats -> 7; - _ -> 0 + update_ram_duration -> 8; + maybe_expire -> 8; + drop_expired -> 8; + emit_stats -> 7; + sync_timeout -> 6; + _ -> 0 end. handle_call({init, Recover}, From, @@ -1013,9 +1012,6 @@ handle_call({requeue, AckTags, ChPid}, From, State) -> handle_cast({run_backing_queue, Mod, Fun}, State) -> noreply(run_backing_queue(Mod, Fun, State)); -handle_cast(sync_timeout, State) -> - noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined})); - handle_cast({deliver, Delivery}, State) -> %% Asynchronous, non-"mandatory", non-"immediate" deliver mode. noreply(deliver_or_enqueue(Delivery, State)); @@ -1133,6 +1129,9 @@ handle_info(update_ram_duration, State = #q{backing_queue = BQ, noreply(State#q{rate_timer_ref = just_measured, backing_queue_state = BQS2}); +handle_info(sync_timeout, State) -> + noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined})); + handle_info(timeout, State) -> noreply(backing_queue_timeout(State)); -- cgit v1.2.1 From d8bca4b66ff9e447a662d340d0f1827da9b8386a Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 11:58:03 +0100 Subject: convert gm's flush timer --- src/gm.erl | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 8b7dc70c..00cf554b 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -376,11 +376,11 @@ confirmed_broadcast/2, group_members/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3, prioritise_cast/2, prioritise_info/2]). + code_change/3, prioritise_info/2]). -export([behaviour_info/1]). --export([table_definitions/0, flush/1]). +-export([table_definitions/0]). -define(GROUP_TABLE, gm_group). -define(HIBERNATE_AFTER_MIN, 1000). @@ -511,9 +511,6 @@ confirmed_broadcast(Server, Msg) -> group_members(Server) -> gen_server2:call(Server, group_members, infinity). -flush(Server) -> - gen_server2:cast(Server, flush). - init([GroupName, Module, Args]) -> {MegaSecs, Secs, MicroSecs} = now(), @@ -629,12 +626,12 @@ handle_cast(join, State = #state { self = Self, {Module:joined(Args, all_known_members(View)), State1}); handle_cast(leave, State) -> - {stop, normal, State}; + {stop, normal, State}. -handle_cast(flush, State) -> - noreply( - flush_broadcast_buffer(State #state { broadcast_timer = undefined })). +handle_info(flush, State) -> + noreply( + flush_broadcast_buffer(State #state { broadcast_timer = undefined })); handle_info({'DOWN', MRef, process, _Pid, _Reason}, State = #state { self = Self, @@ -684,9 +681,7 @@ terminate(Reason, State = #state { module = Module, code_change(_OldVsn, State, _Extra) -> {ok, State}. -prioritise_cast(flush, _State) -> 1; -prioritise_cast(_ , _State) -> 0. - +prioritise_info(flush, _State) -> 1; prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1; prioritise_info(_ , _State) -> 0. @@ -808,10 +803,10 @@ ensure_broadcast_timer(State = #state { broadcast_buffer = [], State; ensure_broadcast_timer(State = #state { broadcast_buffer = [], broadcast_timer = TRef }) -> - timer:cancel(TRef), + rabbit_misc:cancel_timer(TRef), State #state { broadcast_timer = undefined }; ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> - {ok, TRef} = timer:apply_after(?BROADCAST_TIMER, ?MODULE, flush, [self()]), + TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush), State #state { broadcast_timer = TRef }; ensure_broadcast_timer(State) -> State. -- cgit v1.2.1 From 3c3a2ae1f4a6fa06f08aec052876326ac7bfb15e Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 12:03:48 +0100 Subject: convert mirror_queue_slave's sync timer --- src/rabbit_mirror_queue_slave.erl | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index b38a8967..499407fd 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -37,7 +37,7 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, handle_pre_hibernate/1, prioritise_call/3, - prioritise_cast/2]). + prioritise_cast/2, prioritise_info/2]). -export([joined/2, members_changed/3, handle_msg/3]). @@ -197,11 +197,11 @@ handle_cast(update_ram_duration, rabbit_memory_monitor:report_ram_duration(self(), RamDuration), BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), noreply(State #state { rate_timer_ref = just_measured, - backing_queue_state = BQS2 }); + backing_queue_state = BQS2 }). -handle_cast(sync_timeout, State) -> +handle_info(sync_timeout, State) -> noreply(backing_queue_timeout( - State #state { sync_timer_ref = undefined })). + State #state { sync_timer_ref = undefined })); handle_info(timeout, State) -> noreply(backing_queue_timeout(State)); @@ -270,12 +270,17 @@ prioritise_cast(Msg, _State) -> {set_ram_duration_target, _Duration} -> 8; {set_maximum_since_use, _Age} -> 8; {run_backing_queue, _Mod, _Fun} -> 6; - sync_timeout -> 6; {gm, _Msg} -> 5; {post_commit, _Txn, _AckTags} -> 4; _ -> 0 end. +prioritise_info(Msg, _State) -> + case Msg of + sync_timeout -> 6; + _ -> 0 + end. + %% --------------------------------------------------------------------------- %% GM %% --------------------------------------------------------------------------- @@ -516,8 +521,7 @@ backing_queue_timeout(State = #state { backing_queue = BQ }) -> run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State). ensure_sync_timer(State = #state { sync_timer_ref = undefined }) -> - {ok, TRef} = timer:apply_after( - ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]), + TRef = erlang:send_after(?SYNC_INTERVAL, self(), sync_timeout), State #state { sync_timer_ref = TRef }; ensure_sync_timer(State) -> State. @@ -525,7 +529,7 @@ ensure_sync_timer(State) -> stop_sync_timer(State = #state { sync_timer_ref = undefined }) -> State; stop_sync_timer(State = #state { sync_timer_ref = TRef }) -> - {ok, cancel} = timer:cancel(TRef), + rabbit_misc:cancel_timer(TRef), State #state { sync_timer_ref = undefined }. ensure_rate_timer(State = #state { rate_timer_ref = undefined }) -> -- cgit v1.2.1 From 18e2e38f65f844cc893a85452a0b8034547f85ff Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 12:18:58 +0100 Subject: convert another timer --- src/rabbit_mirror_queue_slave.erl | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 499407fd..72db8c16 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -187,9 +187,9 @@ handle_cast({set_ram_duration_target, Duration}, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> BQS1 = BQ:set_ram_duration_target(Duration, BQS), - noreply(State #state { backing_queue_state = BQS1 }); + noreply(State #state { backing_queue_state = BQS1 }). -handle_cast(update_ram_duration, +handle_info(update_ram_duration, State = #state { backing_queue = BQ, backing_queue_state = BQS }) -> {RamDuration, BQS1} = BQ:ram_duration(BQS), @@ -197,7 +197,7 @@ handle_cast(update_ram_duration, rabbit_memory_monitor:report_ram_duration(self(), RamDuration), BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), noreply(State #state { rate_timer_ref = just_measured, - backing_queue_state = BQS2 }). + backing_queue_state = BQS2 }); handle_info(sync_timeout, State) -> noreply(backing_queue_timeout( @@ -266,7 +266,6 @@ prioritise_call(Msg, _From, _State) -> prioritise_cast(Msg, _State) -> case Msg of - update_ram_duration -> 8; {set_ram_duration_target, _Duration} -> 8; {set_maximum_since_use, _Age} -> 8; {run_backing_queue, _Mod, _Fun} -> 6; @@ -277,6 +276,7 @@ prioritise_cast(Msg, _State) -> prioritise_info(Msg, _State) -> case Msg of + update_ram_duration -> 8; sync_timeout -> 6; _ -> 0 end. @@ -533,10 +533,8 @@ stop_sync_timer(State = #state { sync_timer_ref = TRef }) -> State #state { sync_timer_ref = undefined }. ensure_rate_timer(State = #state { rate_timer_ref = undefined }) -> - {ok, TRef} = timer:apply_after( - ?RAM_DURATION_UPDATE_INTERVAL, - rabbit_amqqueue, update_ram_duration, - [self()]), + TRef = erlang:send_after(?RAM_DURATION_UPDATE_INTERVAL, + self(), update_ram_duration), State #state { rate_timer_ref = TRef }; ensure_rate_timer(State = #state { rate_timer_ref = just_measured }) -> State #state { rate_timer_ref = undefined }; -- cgit v1.2.1 From 825f14e3ec42b9590410a6f9469c5ddb9564f839 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 12:27:34 +0100 Subject: just ignore the result to erlang:cancel_timer --- src/gm.erl | 2 +- src/rabbit_amqqueue_process.erl | 6 +++--- src/rabbit_event.erl | 2 +- src/rabbit_mirror_queue_slave.erl | 4 ++-- src/rabbit_misc.erl | 8 +------- src/rabbit_msg_store.erl | 2 +- 6 files changed, 9 insertions(+), 15 deletions(-) diff --git a/src/gm.erl b/src/gm.erl index 00cf554b..8b4d2776 100644 --- a/src/gm.erl +++ b/src/gm.erl @@ -803,7 +803,7 @@ ensure_broadcast_timer(State = #state { broadcast_buffer = [], State; ensure_broadcast_timer(State = #state { broadcast_buffer = [], broadcast_timer = TRef }) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State #state { broadcast_timer = undefined }; ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) -> TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush), diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index eee899b3..f644954f 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -257,7 +257,7 @@ ensure_sync_timer(State) -> stop_sync_timer(State = #q{sync_timer_ref = undefined}) -> State; stop_sync_timer(State = #q{sync_timer_ref = TRef}) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State#q{sync_timer_ref = undefined}. ensure_rate_timer(State = #q{rate_timer_ref = undefined}) -> @@ -274,13 +274,13 @@ stop_rate_timer(State = #q{rate_timer_ref = undefined}) -> stop_rate_timer(State = #q{rate_timer_ref = just_measured}) -> State#q{rate_timer_ref = undefined}; stop_rate_timer(State = #q{rate_timer_ref = TRef}) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State#q{rate_timer_ref = undefined}. stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) -> State; stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State#q{expiry_timer_ref = undefined}. %% We wish to expire only when there are no consumers *and* the expiry diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl index ef098a45..bb765566 100644 --- a/src/rabbit_event.erl +++ b/src/rabbit_event.erl @@ -113,7 +113,7 @@ stop_stats_timer(State = #state{level = none}) -> stop_stats_timer(State = #state{timer = undefined}) -> State; stop_stats_timer(State = #state{timer = TRef}) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State#state{timer = undefined}. reset_stats_timer(State) -> diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl index 72db8c16..12b6f3ca 100644 --- a/src/rabbit_mirror_queue_slave.erl +++ b/src/rabbit_mirror_queue_slave.erl @@ -529,7 +529,7 @@ ensure_sync_timer(State) -> stop_sync_timer(State = #state { sync_timer_ref = undefined }) -> State; stop_sync_timer(State = #state { sync_timer_ref = TRef }) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State #state { sync_timer_ref = undefined }. ensure_rate_timer(State = #state { rate_timer_ref = undefined }) -> @@ -546,7 +546,7 @@ stop_rate_timer(State = #state { rate_timer_ref = undefined }) -> stop_rate_timer(State = #state { rate_timer_ref = just_measured }) -> State #state { rate_timer_ref = undefined }; stop_rate_timer(State = #state { rate_timer_ref = TRef }) -> - {ok, cancel} = timer:cancel(TRef), + erlang:cancel_timer(TRef), State #state { rate_timer_ref = undefined }. ensure_monitoring(ChPid, State = #state { known_senders = KS }) -> diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 337e3d6f..3bbfb1d7 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -51,7 +51,7 @@ -export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3]). -export([get_options/2]). -export([all_module_attributes/1, build_acyclic_graph/3]). --export([now_ms/0, cancel_timer/1]). +-export([now_ms/0]). -export([lock_file/1]). -export([const_ok/0, const/1]). -export([ntoa/1, ntoab/1]). @@ -832,12 +832,6 @@ get_flag(_, []) -> now_ms() -> timer:now_diff(now(), {0,0,0}) div 1000. -cancel_timer(TRef) -> - case erlang:cancel_timer(TRef) of - false -> throw({not_a_valid_timer, TRef}); - _ -> ok - end. - module_attributes(Module) -> case catch Module:module_info(attributes) of {'EXIT', {undef, [{Module, module_info, _} | _]}} -> diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl index 54424803..e90e1281 100644 --- a/src/rabbit_msg_store.erl +++ b/src/rabbit_msg_store.erl @@ -873,7 +873,7 @@ start_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> stop_sync_timer(State = #msstate { sync_timer_ref = undefined }) -> State; stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) -> - rabbit_misc:cancel_timer(TRef), + erlang:cancel_timer(TRef), State #msstate { sync_timer_ref = undefined }. internal_sync(State = #msstate { current_file_handle = CurHdl, -- cgit v1.2.1 From 3e5037b71c4bd5d4947ea0ed3807a51f87255388 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 12:46:53 +0100 Subject: more consistent naming It's called *internal*_emit_stats in the channel. --- src/rabbit_amqqueue_process.erl | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index f644954f..fcca3087 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -182,8 +182,9 @@ declare(Recover, From, State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), - rabbit_event:if_enabled(StatsTimer, - fun() -> emit_stats(State1) end), + rabbit_event:if_enabled( + StatsTimer, + fun() -> internal_emit_stats(State1) end), noreply(State1); Q1 -> {stop, normal, {existing, Q1}, State} end. @@ -754,10 +755,10 @@ consumers(#q{active_consumers = ActiveConsumers, [{ChPid, ConsumerTag, AckRequired} | Acc] end, [], queue:join(ActiveConsumers, BlockedConsumers)). -emit_stats(State) -> - emit_stats(State, []). +internal_emit_stats(State) -> + internal_emit_stats(State, []). -emit_stats(State, Extra) -> +internal_emit_stats(State, Extra) -> rabbit_event:notify(queue_stats, Extra ++ infos(?STATISTICS_KEYS, State)). emit_consumer_created(ChPid, ConsumerTag, Exclusive, AckRequired) -> @@ -1100,7 +1101,7 @@ handle_info(drop_expired, State) -> handle_info(emit_stats, State = #q{stats_timer = StatsTimer}) -> %% Do not invoke noreply as it would see no timer and create a new one. - emit_stats(State), + internal_emit_stats(State), State1 = State#q{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}, assert_invariant(State1), {noreply, State1, hibernate}; @@ -1152,10 +1153,9 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, rabbit_memory_monitor:report_ram_duration(self(), RamDuration), BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1), BQS3 = BQ:handle_pre_hibernate(BQS2), - rabbit_event:if_enabled(StatsTimer, - fun () -> - emit_stats(State, [{idle_since, now()}]) - end), + rabbit_event:if_enabled( + StatsTimer, + fun () -> internal_emit_stats(State, [{idle_since, now()}]) end), State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer), backing_queue_state = BQS3}, {hibernate, stop_rate_timer(State1)}. -- cgit v1.2.1 From 4c735cdfcd26e654a6506f1823d354b9f840dffb Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 16:06:44 +0100 Subject: log rabbitmqctl invocations on the remote node --- src/rabbit_control.erl | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 6eb1aaba..8b20ad51 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -17,7 +17,7 @@ -module(rabbit_control). -include("rabbit.hrl"). --export([start/0, stop/0, action/5, diagnostics/1]). +-export([start/0, stop/0, action/5, diagnostics/1, log_anytime/2]). -define(RPC_TIMEOUT, infinity). -define(WAIT_FOR_VM_ATTEMPTS, 5). @@ -51,6 +51,7 @@ -> 'ok'). -spec(diagnostics/1 :: (node()) -> [{string(), [any()]}]). -spec(usage/0 :: () -> no_return()). +-spec(log_anytime/2 :: (string(), [term()]) -> ok). -endif. @@ -73,6 +74,9 @@ start() -> Command = list_to_atom(Command0), Quiet = proplists:get_bool(?QUIET_OPT, Opts1), Node = proplists:get_value(?NODE_OPT, Opts1), + rpc_call(Node, rabbit_control, log_anytime, + ["~p executing 'rabbitmqctl ~p'~n", + [node(), init:get_plain_arguments()]]), Inform = case Quiet of true -> fun (_Format, _Args1) -> ok end; false -> fun (Format, Args1) -> @@ -109,6 +113,14 @@ start() -> fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args). +%% Log an info item on a remote node regardless of whether rabbit is +%% running there or not: first change the group leader to that of the +%% remote node, then use the standard error logger, because rabbit's +%% might not be running. +log_anytime(Format, Args) -> + group_leader(whereis(user), self()), + error_logger:info_msg(Format, Args). + print_report(Node, {Descr, Module, InfoFun, KeysFun}) -> io:format("~s:~n", [Descr]), print_report0(Node, {Module, InfoFun, KeysFun}, []). -- cgit v1.2.1 From 72c0a4b10d13e02c27235ecdc94d887d13d187dd Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 17:27:01 +0100 Subject: be discrete about what you log --- src/rabbit_control.erl | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 8b20ad51..ad0b6d0c 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -75,8 +75,8 @@ start() -> Quiet = proplists:get_bool(?QUIET_OPT, Opts1), Node = proplists:get_value(?NODE_OPT, Opts1), rpc_call(Node, rabbit_control, log_anytime, - ["~p executing 'rabbitmqctl ~p'~n", - [node(), init:get_plain_arguments()]]), + ["~p executing~nrabbitmqctl ~p~n", + [node(), mask_args([Command0 | Args])]]), Inform = case Quiet of true -> fun (_Format, _Args1) -> ok end; false -> fun (Format, Args1) -> @@ -486,3 +486,13 @@ quit(Status) -> {unix, _} -> halt(Status); {win32, _} -> init:stop(Status) end. + +%% Mask passwords and other sensitive info before logging. +mask_args([]) -> + []; +mask_args(["add_user", Name, Password | Args]) -> + ["add_user", Name, "****" | mask_args(Args)]; +mask_args(["change_password", Name, Password | Args]) -> + ["change_password", Name, "****" | mask_args(Args)]; +mask_args([Arg | Args]) -> + [Arg | mask_args(Args)]. -- cgit v1.2.1 From 227bdc662000b68b4a63a4086f918f73b5ddfe9f Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Tue, 2 Aug 2011 17:48:28 +0100 Subject: refactoring Rename internal_emit_stats to emit_stats. Add the missing export to channel. Prefer one big case statement over a function with two heads. --- src/rabbit_amqqueue_process.erl | 17 ++++++++--------- src/rabbit_channel.erl | 22 ++++++++++------------ src/rabbit_reader.erl | 6 +++--- 3 files changed, 21 insertions(+), 24 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index fcca3087..0b4b62f5 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -184,7 +184,7 @@ declare(Recover, From, infos(?CREATION_EVENT_KEYS, State1)), rabbit_event:if_enabled( StatsTimer, - fun() -> internal_emit_stats(State1) end), + fun() -> emit_stats(State1) end), noreply(State1); Q1 -> {stop, normal, {existing, Q1}, State} end. @@ -755,10 +755,10 @@ consumers(#q{active_consumers = ActiveConsumers, [{ChPid, ConsumerTag, AckRequired} | Acc] end, [], queue:join(ActiveConsumers, BlockedConsumers)). -internal_emit_stats(State) -> - internal_emit_stats(State, []). +emit_stats(State) -> + emit_stats(State, []). -internal_emit_stats(State, Extra) -> +emit_stats(State, Extra) -> rabbit_event:notify(queue_stats, Extra ++ infos(?STATISTICS_KEYS, State)). emit_consumer_created(ChPid, ConsumerTag, Exclusive, AckRequired) -> @@ -798,10 +798,9 @@ prioritise_cast(Msg, _State) -> _ -> 0 end. -prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, - #q{q = #amqqueue{exclusive_owner = DownPid}}) -> 8; -prioritise_info(Msg, _State) -> +prioritise_info(Msg, #q{q = #amqqueue{exclusive_owner = DownPid}}) -> case Msg of + {'DOWN', _, process, DownPid, _} -> 8; update_ram_duration -> 8; maybe_expire -> 8; drop_expired -> 8; @@ -1101,7 +1100,7 @@ handle_info(drop_expired, State) -> handle_info(emit_stats, State = #q{stats_timer = StatsTimer}) -> %% Do not invoke noreply as it would see no timer and create a new one. - internal_emit_stats(State), + emit_stats(State), State1 = State#q{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}, assert_invariant(State1), {noreply, State1, hibernate}; @@ -1155,7 +1154,7 @@ handle_pre_hibernate(State = #q{backing_queue = BQ, BQS3 = BQ:handle_pre_hibernate(BQS2), rabbit_event:if_enabled( StatsTimer, - fun () -> internal_emit_stats(State, [{idle_since, now()}]) end), + fun () -> emit_stats(State, [{idle_since, now()}]) end), State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer), backing_queue_state = BQS3}, {hibernate, stop_rate_timer(State1)}. diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 4dd38fd6..72d6d33a 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -27,7 +27,7 @@ -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2, handle_pre_hibernate/1, prioritise_call/3, - prioritise_cast/2, format_message_queue/2]). + prioritise_cast/2, prioritise_info/2, format_message_queue/2]). -record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid, limiter_pid, start_limiter_fun, tx_status, next_tag, @@ -192,7 +192,7 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost, trace_state = rabbit_trace:init(VHost)}, rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State)), rabbit_event:if_enabled(StatsTimer, - fun() -> internal_emit_stats(State) end), + fun() -> emit_stats(State) end), {ok, State, hibernate, {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}. @@ -211,7 +211,8 @@ prioritise_cast(Msg, _State) -> prioritise_info(Msg, _State) -> case Msg of - emit_stats -> 7 + emit_stats -> 7; + _ -> 0 end. handle_call(flush, _From, State) -> @@ -303,7 +304,7 @@ handle_info(timeout, State) -> noreply(State); handle_info(emit_stats, State = #ch{stats_timer = StatsTimer}) -> - internal_emit_stats(State), + emit_stats(State), noreply([ensure_stats_timer], State#ch{ stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}); @@ -323,11 +324,8 @@ handle_info({'EXIT', _Pid, Reason}, State) -> handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) -> ok = clear_permission_cache(), - rabbit_event:if_enabled(StatsTimer, - fun () -> - internal_emit_stats( - State, [{idle_since, now()}]) - end), + rabbit_event:if_enabled( + StatsTimer, fun () -> emit_stats(State, [{idle_since, now()}]) end), StatsTimer1 = rabbit_event:stop_stats_timer(StatsTimer), {hibernate, State#ch{stats_timer = StatsTimer1}}. @@ -1495,10 +1493,10 @@ update_measures(Type, QX, Inc, Measure) -> put({Type, QX}, orddict:store(Measure, Cur + Inc, Measures)). -internal_emit_stats(State) -> - internal_emit_stats(State, []). +emit_stats(State) -> + emit_stats(State, []). -internal_emit_stats(State = #ch{stats_timer = StatsTimer}, Extra) -> +emit_stats(State = #ch{stats_timer = StatsTimer}, Extra) -> CoarseStats = infos(?STATISTICS_KEYS, State), case rabbit_event:stats_level(StatsTimer) of coarse -> diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl index 3bc0e389..2dccc748 100644 --- a/src/rabbit_reader.erl +++ b/src/rabbit_reader.erl @@ -318,7 +318,7 @@ handle_other({'$gen_call', From, {info, Items}}, Deb, State) -> end), mainloop(Deb, State); handle_other(emit_stats, Deb, State) -> - mainloop(Deb, internal_emit_stats(State)); + mainloop(Deb, emit_stats(State)); handle_other({system, From, Request}, Deb, State = #v1{parent = Parent}) -> sys:handle_system_msg(Request, From, Parent, ?MODULE, Deb, State); handle_other(Other, _Deb, _State) -> @@ -686,7 +686,7 @@ handle_method0(#'connection.open'{virtual_host = VHostPath}, [{type, network} | infos(?CREATION_EVENT_KEYS, State1)]), rabbit_event:if_enabled(StatsTimer, - fun() -> internal_emit_stats(State1) end), + fun() -> emit_stats(State1) end), State1; handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) -> lists:foreach(fun rabbit_channel:shutdown/1, all_channels()), @@ -915,6 +915,6 @@ send_exception(State = #v1{connection = #connection{protocol = Protocol}}, State1#v1.sock, 0, CloseMethod, Protocol), State1. -internal_emit_stats(State = #v1{stats_timer = StatsTimer}) -> +emit_stats(State = #v1{stats_timer = StatsTimer}) -> rabbit_event:notify(connection_stats, infos(?STATISTICS_KEYS, State)), State#v1{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}. -- cgit v1.2.1 From 10f12a2b0d464dc6642ebd04eebb411468eba2c1 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Wed, 3 Aug 2011 12:26:34 +0100 Subject: Cosmetic --- src/rabbit_amqqueue_process.erl | 13 ++++++------- src/rabbit_channel.erl | 3 +-- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl index 0b4b62f5..05de48d6 100644 --- a/src/rabbit_amqqueue_process.erl +++ b/src/rabbit_amqqueue_process.erl @@ -182,9 +182,8 @@ declare(Recover, From, State1 = process_args(State#q{backing_queue_state = BQS}), rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State1)), - rabbit_event:if_enabled( - StatsTimer, - fun() -> emit_stats(State1) end), + rabbit_event:if_enabled(StatsTimer, + fun() -> emit_stats(State1) end), noreply(State1); Q1 -> {stop, normal, {existing, Q1}, State} end. @@ -299,10 +298,10 @@ ensure_expiry_timer(State = #q{expires = Expires}) -> State end. -ensure_stats_timer(State = #q { stats_timer = StatsTimer, - q = #amqqueue { pid = QPid }}) -> - State #q { stats_timer = rabbit_event:ensure_stats_timer( - StatsTimer, QPid, emit_stats) }. +ensure_stats_timer(State = #q{stats_timer = StatsTimer, + q = #amqqueue{pid = QPid}}) -> + State#q{stats_timer = rabbit_event:ensure_stats_timer( + StatsTimer, QPid, emit_stats)}. assert_invariant(#q{active_consumers = AC, backing_queue = BQ, backing_queue_state = BQS}) -> diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl index 72d6d33a..45f0032d 100644 --- a/src/rabbit_channel.erl +++ b/src/rabbit_channel.erl @@ -306,8 +306,7 @@ handle_info(timeout, State) -> handle_info(emit_stats, State = #ch{stats_timer = StatsTimer}) -> emit_stats(State), noreply([ensure_stats_timer], - State#ch{ - stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}); + State#ch{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}); handle_info({'DOWN', MRef, process, QPid, Reason}, State = #ch{consumer_monitors = ConsumerMonitors}) -> -- cgit v1.2.1 From 6ea01578cabe462c32dda4479b40785e1a593230 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 3 Aug 2011 13:12:51 +0100 Subject: refactor mask_args --- src/rabbit_control.erl | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index ad0b6d0c..bb42efb6 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -75,8 +75,8 @@ start() -> Quiet = proplists:get_bool(?QUIET_OPT, Opts1), Node = proplists:get_value(?NODE_OPT, Opts1), rpc_call(Node, rabbit_control, log_anytime, - ["~p executing~nrabbitmqctl ~p~n", - [node(), mask_args([Command0 | Args])]]), + ["~p executing~nrabbitmqctl ~p ~p~n", + [node(), Command0, mask_args(Command0, Args)]]), Inform = case Quiet of true -> fun (_Format, _Args1) -> ok end; false -> fun (Format, Args1) -> @@ -488,11 +488,9 @@ quit(Status) -> end. %% Mask passwords and other sensitive info before logging. -mask_args([]) -> - []; -mask_args(["add_user", Name, Password | Args]) -> - ["add_user", Name, "****" | mask_args(Args)]; -mask_args(["change_password", Name, Password | Args]) -> - ["change_password", Name, "****" | mask_args(Args)]; -mask_args([Arg | Args]) -> - [Arg | mask_args(Args)]. +mask_args("add_user", [Name, Password | Args]) -> + [Name, "****" | Args]; +mask_args("change_password", [Name, Password | Args]) -> + [Name, "****" | Args]; +mask_args(_, Args) -> + Args. -- cgit v1.2.1 From a92021dd7c1d79f6b273c3704b47e7c8e6b84358 Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 3 Aug 2011 13:39:31 +0100 Subject: abstract out with_local_io/1 Because it will be needed whenever a rabbitmqctl command causes the logger to print anything. --- src/rabbit_control.erl | 27 ++++++++++++--------------- src/rabbit_misc.erl | 11 ++++++++++- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index bb42efb6..4bc25998 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -17,7 +17,7 @@ -module(rabbit_control). -include("rabbit.hrl"). --export([start/0, stop/0, action/5, diagnostics/1, log_anytime/2]). +-export([start/0, stop/0, action/5, diagnostics/1, log_action/3]). -define(RPC_TIMEOUT, infinity). -define(WAIT_FOR_VM_ATTEMPTS, 5). @@ -51,7 +51,7 @@ -> 'ok'). -spec(diagnostics/1 :: (node()) -> [{string(), [any()]}]). -spec(usage/0 :: () -> no_return()). --spec(log_anytime/2 :: (string(), [term()]) -> ok). +-spec(log_action/3 :: (node(), string(), [term()]) -> ok). -endif. @@ -74,9 +74,7 @@ start() -> Command = list_to_atom(Command0), Quiet = proplists:get_bool(?QUIET_OPT, Opts1), Node = proplists:get_value(?NODE_OPT, Opts1), - rpc_call(Node, rabbit_control, log_anytime, - ["~p executing~nrabbitmqctl ~p ~p~n", - [node(), Command0, mask_args(Command0, Args)]]), + rpc_call(Node, rabbit_control, log_action, [node(), Command0, Args]), Inform = case Quiet of true -> fun (_Format, _Args1) -> ok end; false -> fun (Format, Args1) -> @@ -113,14 +111,6 @@ start() -> fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args). -%% Log an info item on a remote node regardless of whether rabbit is -%% running there or not: first change the group leader to that of the -%% remote node, then use the standard error logger, because rabbit's -%% might not be running. -log_anytime(Format, Args) -> - group_leader(whereis(user), self()), - error_logger:info_msg(Format, Args). - print_report(Node, {Descr, Module, InfoFun, KeysFun}) -> io:format("~s:~n", [Descr]), print_report0(Node, {Module, InfoFun, KeysFun}, []). @@ -487,10 +477,17 @@ quit(Status) -> {win32, _} -> init:stop(Status) end. +log_action(Node, Command, Args) -> + rabbit_misc:with_local_io( + fun () -> + error_logger:info_msg("~p executing~nrabbitmqctl ~p ~p~n", + [Node, Command, mask_args(Command, Args)]) + end). + %% Mask passwords and other sensitive info before logging. -mask_args("add_user", [Name, Password | Args]) -> +mask_args("add_user", [Name, _Password | Args]) -> [Name, "****" | Args]; -mask_args("change_password", [Name, Password | Args]) -> +mask_args("change_password", [Name, _Password | Args]) -> [Name, "****" | Args]; mask_args(_, Args) -> Args. diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 3bbfb1d7..0d77ffea 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -42,7 +42,7 @@ -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]). -export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]). -export([append_file/2, ensure_parent_dirs_exist/1]). --export([format_stderr/2]). +-export([format_stderr/2, with_local_io/1]). -export([start_applications/1, stop_applications/1]). -export([unfold/2, ceil/1, queue_fold/3]). -export([sort_field_table/1]). @@ -165,6 +165,7 @@ -spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). -spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). -spec(format_stderr/2 :: (string(), [any()]) -> 'ok'). +-spec(with_local_io/1 :: (fun (() -> 'ok')) -> 'ok'). -spec(start_applications/1 :: ([atom()]) -> 'ok'). -spec(stop_applications/1 :: ([atom()]) -> 'ok'). -spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}). @@ -605,6 +606,14 @@ format_stderr(Fmt, Args) -> end, ok. +%% Execute Fun using the IO system of the local node (i.e. the node on +%% which the code is executing). +with_local_io(Fun) -> + GL = group_leader(), + group_leader(whereis(user), self()), + Fun(), + group_leader(GL, self()). + manage_applications(Iterate, Do, Undo, SkipError, ErrorTag, Apps) -> Iterate(fun (App, Acc) -> case Do(App) of -- cgit v1.2.1 From feec24fd86118fb070bc4dd033b53378c91086bd Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Wed, 3 Aug 2011 13:39:46 +0100 Subject: cosmetic --- src/rabbit_control.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 4bc25998..2dd246d5 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -480,7 +480,7 @@ quit(Status) -> log_action(Node, Command, Args) -> rabbit_misc:with_local_io( fun () -> - error_logger:info_msg("~p executing~nrabbitmqctl ~p ~p~n", + error_logger:info_msg("~p executing~n rabbitmqctl ~p ~p~n", [Node, Command, mask_args(Command, Args)]) end). -- cgit v1.2.1 From efca72ac7bf49df1091ecc42d3e9d1751f5c5aaa Mon Sep 17 00:00:00 2001 From: Alexandru Scvortov Date: Thu, 4 Aug 2011 12:26:46 +0100 Subject: better argument formatting --- src/rabbit_control.erl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl index 2dd246d5..e8afed0c 100644 --- a/src/rabbit_control.erl +++ b/src/rabbit_control.erl @@ -480,8 +480,9 @@ quit(Status) -> log_action(Node, Command, Args) -> rabbit_misc:with_local_io( fun () -> - error_logger:info_msg("~p executing~n rabbitmqctl ~p ~p~n", - [Node, Command, mask_args(Command, Args)]) + error_logger:info_msg("~p executing~n rabbitmqctl ~s ~s~n", + [Node, Command, + format_args(mask_args(Command, Args))]) end). %% Mask passwords and other sensitive info before logging. @@ -491,3 +492,6 @@ mask_args("change_password", [Name, _Password | Args]) -> [Name, "****" | Args]; mask_args(_, Args) -> Args. + +format_args(Args) -> + string:join([io_lib:format("~p", [A]) || A <- Args], " "). -- cgit v1.2.1 From 2214f8a8ec047d32c78d8de94e2aa952d326e969 Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 4 Aug 2011 13:02:27 +0100 Subject: Return the value of Fun(), and make sure we always restore the group leader. --- src/rabbit_misc.erl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index 0d77ffea..a5c475b7 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -611,8 +611,11 @@ format_stderr(Fmt, Args) -> with_local_io(Fun) -> GL = group_leader(), group_leader(whereis(user), self()), - Fun(), - group_leader(GL, self()). + try + Fun() + after + group_leader(GL, self()) + end. manage_applications(Iterate, Do, Undo, SkipError, ErrorTag, Apps) -> Iterate(fun (App, Acc) -> -- cgit v1.2.1 From daa02553c4edb37e1f245528da543b764b90683b Mon Sep 17 00:00:00 2001 From: Simon MacMullen Date: Thu, 4 Aug 2011 13:35:11 +0100 Subject: More generic. --- src/rabbit_misc.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl index a5c475b7..b98dbd46 100644 --- a/src/rabbit_misc.erl +++ b/src/rabbit_misc.erl @@ -165,7 +165,7 @@ -spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()). -spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok'). -spec(format_stderr/2 :: (string(), [any()]) -> 'ok'). --spec(with_local_io/1 :: (fun (() -> 'ok')) -> 'ok'). +-spec(with_local_io/1 :: (fun (() -> A)) -> A). -spec(start_applications/1 :: ([atom()]) -> 'ok'). -spec(stop_applications/1 :: ([atom()]) -> 'ok'). -spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}). -- cgit v1.2.1