summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEmile Joubert <emile@rabbitmq.com>2011-09-26 21:37:25 +0100
committerEmile Joubert <emile@rabbitmq.com>2011-09-26 21:37:25 +0100
commita61f1f416d4ac032f68de25757f924e7e1664a72 (patch)
tree03bdd8658674c92321e355c261e7d5414648dbeb /src
parent8e975df2bcace8b4fe31bfe144e45e3e928cb944 (diff)
parentbc314f07b6059e8e4344ccac34a82d04dd1d1650 (diff)
downloadrabbitmq-server-a61f1f416d4ac032f68de25757f924e7e1664a72.tar.gz
Merged default into bug23764
Diffstat (limited to 'src')
-rw-r--r--src/delegate.erl6
-rw-r--r--src/delegate_sup.erl2
-rw-r--r--src/file_handle_cache.erl218
-rw-r--r--src/gatherer.erl2
-rw-r--r--src/gen_server2.erl64
-rw-r--r--src/gm.erl27
-rw-r--r--src/mirrored_supervisor.erl542
-rw-r--r--src/mirrored_supervisor_tests.erl309
-rw-r--r--src/pg2_fixed.erl400
-rw-r--r--src/priority_queue.erl36
-rw-r--r--src/rabbit.erl55
-rw-r--r--src/rabbit_access_control.erl3
-rw-r--r--src/rabbit_amqqueue.erl65
-rw-r--r--src/rabbit_amqqueue_process.erl628
-rw-r--r--src/rabbit_amqqueue_sup.erl14
-rw-r--r--src/rabbit_auth_backend_internal.erl30
-rw-r--r--src/rabbit_backing_queue_qc.erl4
-rw-r--r--src/rabbit_basic.erl74
-rw-r--r--src/rabbit_binding.erl31
-rw-r--r--src/rabbit_channel.erl531
-rw-r--r--src/rabbit_channel_sup.erl41
-rw-r--r--src/rabbit_client_sup.erl3
-rw-r--r--src/rabbit_command_assembler.erl4
-rw-r--r--src/rabbit_control.erl75
-rw-r--r--src/rabbit_direct.erl36
-rw-r--r--src/rabbit_error_logger.erl10
-rw-r--r--src/rabbit_error_logger_file_h.erl38
-rw-r--r--src/rabbit_event.erl16
-rw-r--r--src/rabbit_file.erl282
-rw-r--r--src/rabbit_guid.erl4
-rw-r--r--src/rabbit_limiter.erl146
-rw-r--r--src/rabbit_log.erl2
-rw-r--r--src/rabbit_mirror_queue_coordinator.erl82
-rw-r--r--src/rabbit_mirror_queue_master.erl32
-rw-r--r--src/rabbit_mirror_queue_misc.erl50
-rw-r--r--src/rabbit_mirror_queue_slave.erl190
-rw-r--r--src/rabbit_misc.erl212
-rw-r--r--src/rabbit_mnesia.erl257
-rw-r--r--src/rabbit_msg_store.erl101
-rw-r--r--src/rabbit_networking.erl47
-rw-r--r--src/rabbit_node_monitor.erl17
-rw-r--r--src/rabbit_prelaunch.erl41
-rw-r--r--src/rabbit_queue_index.erl50
-rw-r--r--src/rabbit_reader.erl68
-rw-r--r--src/rabbit_restartable_sup.erl10
-rw-r--r--src/rabbit_sasl_report_file_h.erl24
-rw-r--r--src/rabbit_sup.erl15
-rw-r--r--src/rabbit_tests.erl348
-rw-r--r--src/rabbit_tests_event_receiver.erl37
-rw-r--r--src/rabbit_trace.erl4
-rw-r--r--src/rabbit_upgrade.erl30
-rw-r--r--src/rabbit_upgrade_functions.erl7
-rw-r--r--src/rabbit_version.erl4
-rw-r--r--src/rabbit_vhost.erl4
-rw-r--r--src/rabbit_writer.erl3
-rw-r--r--src/supervisor2.erl29
-rw-r--r--src/tcp_acceptor_sup.erl8
-rw-r--r--src/tcp_listener.erl8
-rw-r--r--src/tcp_listener_sup.erl15
-rw-r--r--src/test_sup.erl12
-rw-r--r--src/vm_memory_monitor.erl4
-rw-r--r--src/worker_pool.erl1
-rw-r--r--src/worker_pool_sup.erl4
63 files changed, 3816 insertions, 1596 deletions
diff --git a/src/delegate.erl b/src/delegate.erl
index 17046201..edb4eba4 100644
--- a/src/delegate.erl
+++ b/src/delegate.erl
@@ -28,13 +28,13 @@
-ifdef(use_specs).
-spec(start_link/1 ::
- (non_neg_integer()) -> {'ok', pid()} | {'error', any()}).
--spec(invoke_no_result/2 ::
- (pid() | [pid()], fun ((pid()) -> any())) -> 'ok').
+ (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}).
-spec(invoke/2 ::
( pid(), fun ((pid()) -> A)) -> A;
([pid()], fun ((pid()) -> A)) -> {[{pid(), A}],
[{pid(), term()}]}).
+-spec(invoke_no_result/2 ::
+ (pid() | [pid()], fun ((pid()) -> any())) -> 'ok').
-endif.
diff --git a/src/delegate_sup.erl b/src/delegate_sup.erl
index fc693c7d..4c131a6c 100644
--- a/src/delegate_sup.erl
+++ b/src/delegate_sup.erl
@@ -28,7 +28,7 @@
-ifdef(use_specs).
--spec(start_link/1 :: (integer()) -> {'ok', pid()} | {'error', any()}).
+-spec(start_link/1 :: (integer()) -> rabbit_types:ok_pid_or_error()).
-spec(count/1 :: ([node()]) -> integer()).
-endif.
diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl
index 61b08d49..6c3f1b5f 100644
--- a/src/file_handle_cache.erl
+++ b/src/file_handle_cache.erl
@@ -44,7 +44,6 @@
%% 4) You can find out what your 'real' offset is, and what your
%% 'virtual' offset is (i.e. where the hdl really is, and where it
%% would be after the write buffer is written out).
-%% 5) You can find out what the offset was when you last sync'd.
%%
%% There is also a server component which serves to limit the number
%% of open file descriptors. This is a hard limit: the server
@@ -121,37 +120,39 @@
%% do not need to worry about their handles being closed by the server
%% - reopening them when necessary is handled transparently.
%%
-%% The server also supports obtain and transfer. obtain/0 blocks until
-%% a file descriptor is available, at which point the requesting
-%% process is considered to 'own' one more descriptor. transfer/1
-%% transfers ownership of a file descriptor between processes. It is
-%% non-blocking. Obtain is used to obtain permission to accept file
-%% descriptors. Obtain has a lower limit, set by the ?OBTAIN_LIMIT/1
-%% macro. File handles can use the entire limit, but will be evicted
-%% by obtain calls up to the point at which no more obtain calls can
-%% be satisfied by the obtains limit. Thus there will always be some
-%% capacity available for file handles. Processes that use obtain are
-%% never asked to return them, and they are not managed in any way by
-%% the server. It is simply a mechanism to ensure that processes that
-%% need file descriptors such as sockets can do so in such a way that
-%% the overall number of open file descriptors is managed.
+%% The server also supports obtain, release and transfer. obtain/0
+%% blocks until a file descriptor is available, at which point the
+%% requesting process is considered to 'own' one more
+%% descriptor. release/0 is the inverse operation and releases a
+%% previously obtained descriptor. transfer/1 transfers ownership of a
+%% file descriptor between processes. It is non-blocking. Obtain is
+%% used to obtain permission to accept file descriptors. Obtain has a
+%% lower limit, set by the ?OBTAIN_LIMIT/1 macro. File handles can use
+%% the entire limit, but will be evicted by obtain calls up to the
+%% point at which no more obtain calls can be satisfied by the obtains
+%% limit. Thus there will always be some capacity available for file
+%% handles. Processes that use obtain are never asked to return them,
+%% and they are not managed in any way by the server. It is simply a
+%% mechanism to ensure that processes that need file descriptors such
+%% as sockets can do so in such a way that the overall number of open
+%% file descriptors is managed.
%%
%% The callers of register_callback/3, obtain/0, and the argument of
%% transfer/1 are monitored, reducing the count of handles in use
%% appropriately when the processes terminate.
--behaviour(gen_server).
+-behaviour(gen_server2).
-export([register_callback/3]).
-export([open/3, close/1, read/2, append/2, sync/1, position/2, truncate/1,
- last_sync_offset/1, current_virtual_offset/1, current_raw_offset/1,
- flush/1, copy/3, set_maximum_since_use/1, delete/1, clear/1]).
--export([obtain/0, transfer/1, set_limit/1, get_limit/0, info_keys/0, info/0,
- info/1]).
+ current_virtual_offset/1, current_raw_offset/1, flush/1, copy/3,
+ set_maximum_since_use/1, delete/1, clear/1]).
+-export([obtain/0, release/0, transfer/1, set_limit/1, get_limit/0, info_keys/0,
+ info/0, info/1]).
-export([ulimit/0]).
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+ terminate/2, code_change/3, prioritise_cast/2]).
-define(SERVER, ?MODULE).
-define(RESERVED_FOR_OTHERS, 100).
@@ -160,7 +161,8 @@
-define(FILE_HANDLES_CHECK_INTERVAL, 2000).
-define(OBTAIN_LIMIT(LIMIT), trunc((LIMIT * 0.9) - 2)).
--define(CLIENT_ETS_TABLE, ?MODULE).
+-define(CLIENT_ETS_TABLE, file_handle_cache_client).
+-define(ELDERS_ETS_TABLE, file_handle_cache_elders).
%%----------------------------------------------------------------------------
@@ -172,7 +174,6 @@
-record(handle,
{ hdl,
offset,
- trusted_offset,
is_dirty,
write_buffer_size,
write_buffer_size_limit,
@@ -230,7 +231,7 @@
-spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok').
-spec(open/3 ::
- (string(), [any()],
+ (file:filename(), [any()],
[{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')}])
-> val_or_error(ref())).
-spec(close/1 :: (ref()) -> ok_or_error()).
@@ -240,23 +241,23 @@
-spec(sync/1 :: (ref()) -> ok_or_error()).
-spec(position/2 :: (ref(), position()) -> val_or_error(offset())).
-spec(truncate/1 :: (ref()) -> ok_or_error()).
--spec(last_sync_offset/1 :: (ref()) -> val_or_error(offset())).
-spec(current_virtual_offset/1 :: (ref()) -> val_or_error(offset())).
-spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())).
-spec(flush/1 :: (ref()) -> ok_or_error()).
-spec(copy/3 :: (ref(), ref(), non_neg_integer()) ->
val_or_error(non_neg_integer())).
--spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok').
-spec(delete/1 :: (ref()) -> ok_or_error()).
-spec(clear/1 :: (ref()) -> ok_or_error()).
+-spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok').
-spec(obtain/0 :: () -> 'ok').
+-spec(release/0 :: () -> 'ok').
-spec(transfer/1 :: (pid()) -> 'ok').
-spec(set_limit/1 :: (non_neg_integer()) -> 'ok').
-spec(get_limit/0 :: () -> non_neg_integer()).
--spec(info_keys/0 :: () -> [atom()]).
--spec(info/0 :: () -> [{atom(), any()}]).
--spec(info/1 :: ([atom()]) -> [{atom(), any()}]).
--spec(ulimit/0 :: () -> 'infinity' | 'unknown' | non_neg_integer()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(info/0 :: () -> rabbit_types:infos()).
+-spec(info/1 :: ([atom()]) -> rabbit_types:infos()).
+-spec(ulimit/0 :: () -> 'unknown' | non_neg_integer()).
-endif.
@@ -268,11 +269,11 @@
%%----------------------------------------------------------------------------
start_link() ->
- gen_server:start_link({local, ?SERVER}, ?MODULE, [], [{timeout, infinity}]).
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [], [{timeout, infinity}]).
register_callback(M, F, A)
when is_atom(M) andalso is_atom(F) andalso is_list(A) ->
- gen_server:cast(?SERVER, {register_callback, self(), {M, F, A}}).
+ gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}).
open(Path, Mode, Options) ->
Path1 = filename:absname(Path),
@@ -320,7 +321,7 @@ read(Ref, Count) ->
fun ([#handle { is_read = false }]) ->
{error, not_open_for_reading};
([Handle = #handle { hdl = Hdl, offset = Offset }]) ->
- case file:read(Hdl, Count) of
+ case prim_file:read(Hdl, Count) of
{ok, Data} = Obj -> Offset1 = Offset + iolist_size(Data),
{Obj,
[Handle #handle { offset = Offset1 }]};
@@ -340,7 +341,7 @@ append(Ref, Data) ->
write_buffer_size_limit = 0,
at_eof = true } = Handle1} ->
Offset1 = Offset + iolist_size(Data),
- {file:write(Hdl, Data),
+ {prim_file:write(Hdl, Data),
[Handle1 #handle { is_dirty = true, offset = Offset1 }]};
{{ok, _Offset}, #handle { write_buffer = WriteBuffer,
write_buffer_size = Size,
@@ -365,11 +366,10 @@ sync(Ref) ->
[Ref],
fun ([#handle { is_dirty = false, write_buffer = [] }]) ->
ok;
- ([Handle = #handle { hdl = Hdl, offset = Offset,
+ ([Handle = #handle { hdl = Hdl,
is_dirty = true, write_buffer = [] }]) ->
- case file:sync(Hdl) of
- ok -> {ok, [Handle #handle { trusted_offset = Offset,
- is_dirty = false }]};
+ case prim_file:sync(Hdl) of
+ ok -> {ok, [Handle #handle { is_dirty = false }]};
Error -> {Error, [Handle]}
end
end).
@@ -384,21 +384,13 @@ position(Ref, NewOffset) ->
truncate(Ref) ->
with_flushed_handles(
[Ref],
- fun ([Handle1 = #handle { hdl = Hdl, offset = Offset,
- trusted_offset = TOffset }]) ->
- case file:truncate(Hdl) of
- ok -> TOffset1 = lists:min([Offset, TOffset]),
- {ok, [Handle1 #handle { trusted_offset = TOffset1,
- at_eof = true }]};
+ fun ([Handle1 = #handle { hdl = Hdl }]) ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
Error -> {Error, [Handle1]}
end
end).
-last_sync_offset(Ref) ->
- with_handles([Ref], fun ([#handle { trusted_offset = TOffset }]) ->
- {ok, TOffset}
- end).
-
current_virtual_offset(Ref) ->
with_handles([Ref], fun ([#handle { at_eof = true, is_write = true,
offset = Offset,
@@ -420,7 +412,7 @@ copy(Src, Dest, Count) ->
fun ([SHandle = #handle { is_read = true, hdl = SHdl, offset = SOffset },
DHandle = #handle { is_write = true, hdl = DHdl, offset = DOffset }]
) ->
- case file:copy(SHdl, DHdl, Count) of
+ case prim_file:copy(SHdl, DHdl, Count) of
{ok, Count1} = Result1 ->
{Result1,
[SHandle #handle { offset = SOffset + Count1 },
@@ -440,7 +432,7 @@ delete(Ref) ->
Handle = #handle { path = Path } ->
case hard_close(Handle #handle { is_dirty = false,
write_buffer = [] }) of
- ok -> file:delete(Path);
+ ok -> prim_file:delete(Path);
{Error, Handle1} -> put_handle(Ref, Handle1),
Error
end
@@ -455,9 +447,8 @@ clear(Ref) ->
case maybe_seek(bof, Handle #handle { write_buffer = [],
write_buffer_size = 0 }) of
{{ok, 0}, Handle1 = #handle { hdl = Hdl }} ->
- case file:truncate(Hdl) of
- ok -> {ok, [Handle1 #handle {trusted_offset = 0,
- at_eof = true }]};
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
Error -> {Error, [Handle1]}
end;
{{error, _} = Error, Handle1} ->
@@ -483,21 +474,28 @@ set_maximum_since_use(MaximumAge) ->
end.
obtain() ->
- gen_server:call(?SERVER, {obtain, self()}, infinity).
+ %% If the FHC isn't running, obtains succeed immediately.
+ case whereis(?SERVER) of
+ undefined -> ok;
+ _ -> gen_server2:call(?SERVER, {obtain, self()}, infinity)
+ end.
+
+release() ->
+ gen_server2:cast(?SERVER, {release, self()}).
transfer(Pid) ->
- gen_server:cast(?SERVER, {transfer, self(), Pid}).
+ gen_server2:cast(?SERVER, {transfer, self(), Pid}).
set_limit(Limit) ->
- gen_server:call(?SERVER, {set_limit, Limit}, infinity).
+ gen_server2:call(?SERVER, {set_limit, Limit}, infinity).
get_limit() ->
- gen_server:call(?SERVER, get_limit, infinity).
+ gen_server2:call(?SERVER, get_limit, infinity).
info_keys() -> ?INFO_KEYS.
info() -> info(?INFO_KEYS).
-info(Items) -> gen_server:call(?SERVER, {info, Items}, infinity).
+info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
%%----------------------------------------------------------------------------
%% Internal functions
@@ -551,8 +549,8 @@ get_or_reopen(RefNewOrReopens) ->
{ok, [Handle || {_Ref, Handle} <- OpenHdls]};
{OpenHdls, ClosedHdls} ->
Oldest = oldest(get_age_tree(), fun () -> now() end),
- case gen_server:call(?SERVER, {open, self(), length(ClosedHdls),
- Oldest}, infinity) of
+ case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls),
+ Oldest}, infinity) of
ok ->
case reopen(ClosedHdls) of
{ok, RefHdls} -> sort_handles(RefNewOrReopens,
@@ -579,24 +577,23 @@ reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed,
offset = Offset,
last_used_at = undefined }} |
RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) ->
- case file:open(Path, case NewOrReopen of
- new -> Mode;
- reopen -> [read | Mode]
- end) of
+ case prim_file:open(Path, case NewOrReopen of
+ new -> Mode;
+ reopen -> [read | Mode]
+ end) of
{ok, Hdl} ->
Now = now(),
- {{ok, Offset1}, Handle1} =
+ {{ok, _Offset}, Handle1} =
maybe_seek(Offset, Handle #handle { hdl = Hdl,
offset = 0,
last_used_at = Now }),
- Handle2 = Handle1 #handle { trusted_offset = Offset1 },
- put({Ref, fhc_handle}, Handle2),
+ put({Ref, fhc_handle}, Handle1),
reopen(RefNewOrReopenHdls, gb_trees:insert(Now, Ref, Tree),
- [{Ref, Handle2} | RefHdls]);
+ [{Ref, Handle1} | RefHdls]);
Error ->
%% NB: none of the handles in ToOpen are in the age tree
Oldest = oldest(Tree, fun () -> undefined end),
- [gen_server:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen],
+ [gen_server2:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen],
put_age_tree(Tree),
Error
end.
@@ -645,7 +642,7 @@ age_tree_delete(Then) ->
fun (Tree) ->
Tree1 = gb_trees:delete_any(Then, Tree),
Oldest = oldest(Tree1, fun () -> undefined end),
- gen_server:cast(?SERVER, {close, self(), Oldest}),
+ gen_server2:cast(?SERVER, {close, self(), Oldest}),
Tree1
end).
@@ -655,7 +652,7 @@ age_tree_change() ->
case gb_trees:is_empty(Tree) of
true -> Tree;
false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
- gen_server:cast(?SERVER, {update, self(), Oldest})
+ gen_server2:cast(?SERVER, {update, self(), Oldest})
end,
Tree
end).
@@ -677,7 +674,6 @@ new_closed_handle(Path, Mode, Options) ->
Ref = make_ref(),
put({Ref, fhc_handle}, #handle { hdl = closed,
offset = 0,
- trusted_offset = 0,
is_dirty = false,
write_buffer_size = 0,
write_buffer_size_limit = WriteBufferSize,
@@ -705,17 +701,15 @@ soft_close(Handle = #handle { hdl = closed }) ->
soft_close(Handle) ->
case write_buffer(Handle) of
{ok, #handle { hdl = Hdl,
- offset = Offset,
is_dirty = IsDirty,
last_used_at = Then } = Handle1 } ->
ok = case IsDirty of
- true -> file:sync(Hdl);
+ true -> prim_file:sync(Hdl);
false -> ok
end,
- ok = file:close(Hdl),
+ ok = prim_file:close(Hdl),
age_tree_delete(Then),
{ok, Handle1 #handle { hdl = closed,
- trusted_offset = Offset,
is_dirty = false,
last_used_at = undefined }};
{_Error, _Handle} = Result ->
@@ -748,7 +742,7 @@ maybe_seek(NewOffset, Handle = #handle { hdl = Hdl, offset = Offset,
at_eof = AtEoF }) ->
{AtEoF1, NeedsSeek} = needs_seek(AtEoF, Offset, NewOffset),
case (case NeedsSeek of
- true -> file:position(Hdl, NewOffset);
+ true -> prim_file:position(Hdl, NewOffset);
false -> {ok, Offset}
end) of
{ok, Offset1} = Result ->
@@ -785,7 +779,7 @@ write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
write_buffer = WriteBuffer,
write_buffer_size = DataSize,
at_eof = true }) ->
- case file:write(Hdl, lists:reverse(WriteBuffer)) of
+ case prim_file:write(Hdl, lists:reverse(WriteBuffer)) of
ok ->
Offset1 = Offset + DataSize,
{ok, Handle #handle { offset = Offset1, is_dirty = true,
@@ -801,7 +795,7 @@ i(obtain_limit, #fhc_state{obtain_limit = Limit}) -> Limit;
i(Item, _) -> throw({bad_argument, Item}).
%%----------------------------------------------------------------------------
-%% gen_server callbacks
+%% gen_server2 callbacks
%%----------------------------------------------------------------------------
init([]) ->
@@ -811,7 +805,6 @@ init([]) ->
Watermark;
_ ->
case ulimit() of
- infinity -> infinity;
unknown -> ?FILE_HANDLES_LIMIT_OTHER;
Lim -> lists:max([2, Lim - ?RESERVED_FOR_OTHERS])
end
@@ -820,7 +813,8 @@ init([]) ->
error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n",
[Limit, ObtainLimit]),
Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
- {ok, #fhc_state { elders = dict:new(),
+ Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
+ {ok, #fhc_state { elders = Elders,
limit = Limit,
open_count = 0,
open_pending = pending_new(),
@@ -830,34 +824,39 @@ init([]) ->
clients = Clients,
timer_ref = undefined }}.
+prioritise_cast(Msg, _State) ->
+ case Msg of
+ {release, _} -> 5;
+ _ -> 0
+ end.
+
handle_call({open, Pid, Requested, EldestUnusedSince}, From,
State = #fhc_state { open_count = Count,
open_pending = Pending,
elders = Elders,
clients = Clients })
when EldestUnusedSince =/= undefined ->
- Elders1 = dict:store(Pid, EldestUnusedSince, Elders),
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
Item = #pending { kind = open,
pid = Pid,
requested = Requested,
from = From },
ok = track_client(Pid, Clients),
- State1 = State #fhc_state { elders = Elders1 },
- case needs_reduce(State1 #fhc_state { open_count = Count + Requested }) of
+ case needs_reduce(State #fhc_state { open_count = Count + Requested }) of
true -> case ets:lookup(Clients, Pid) of
[#cstate { opened = 0 }] ->
true = ets:update_element(
Clients, Pid, {#cstate.blocked, true}),
{noreply,
- reduce(State1 #fhc_state {
+ reduce(State #fhc_state {
open_pending = pending_in(Item, Pending) })};
[#cstate { opened = Opened }] ->
true = ets:update_element(
Clients, Pid,
{#cstate.pending_closes, Opened}),
- {reply, close, State1}
+ {reply, close, State}
end;
- false -> {noreply, run_pending_item(Item, State1)}
+ false -> {noreply, run_pending_item(Item, State)}
end;
handle_call({obtain, Pid}, From, State = #fhc_state { obtain_count = Count,
@@ -905,30 +904,33 @@ handle_cast({register_callback, Pid, MFA},
handle_cast({update, Pid, EldestUnusedSince},
State = #fhc_state { elders = Elders })
when EldestUnusedSince =/= undefined ->
- Elders1 = dict:store(Pid, EldestUnusedSince, Elders),
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
%% don't call maybe_reduce from here otherwise we can create a
%% storm of messages
- {noreply, State #fhc_state { elders = Elders1 }};
+ {noreply, State};
+
+handle_cast({release, Pid}, State) ->
+ {noreply, adjust_alarm(State, process_pending(
+ update_counts(obtain, Pid, -1, State)))};
handle_cast({close, Pid, EldestUnusedSince},
State = #fhc_state { elders = Elders, clients = Clients }) ->
- Elders1 = case EldestUnusedSince of
- undefined -> dict:erase(Pid, Elders);
- _ -> dict:store(Pid, EldestUnusedSince, Elders)
- end,
+ true = case EldestUnusedSince of
+ undefined -> ets:delete(Elders, Pid);
+ _ -> ets:insert(Elders, {Pid, EldestUnusedSince})
+ end,
ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}),
{noreply, adjust_alarm(State, process_pending(
- update_counts(open, Pid, -1,
- State #fhc_state { elders = Elders1 })))};
+ update_counts(open, Pid, -1, State)))};
handle_cast({transfer, FromPid, ToPid}, State) ->
ok = track_client(ToPid, State#fhc_state.clients),
{noreply, process_pending(
update_counts(obtain, ToPid, +1,
- update_counts(obtain, FromPid, -1, State)))};
+ update_counts(obtain, FromPid, -1, State)))}.
-handle_cast(check_counts, State) ->
- {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })}.
+handle_info(check_counts, State) ->
+ {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })};
handle_info({'DOWN', _MRef, process, Pid, _Reason},
State = #fhc_state { elders = Elders,
@@ -940,6 +942,7 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
[#cstate { opened = Opened, obtained = Obtained }] =
ets:lookup(Clients, Pid),
true = ets:delete(Clients, Pid),
+ true = ets:delete(Elders, Pid),
FilterFun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end,
{noreply, adjust_alarm(
State,
@@ -948,11 +951,12 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
open_count = OpenCount - Opened,
open_pending = filter_pending(FilterFun, OpenPending),
obtain_count = ObtainCount - Obtained,
- obtain_pending = filter_pending(FilterFun, ObtainPending),
- elders = dict:erase(Pid, Elders) }))}.
+ obtain_pending = filter_pending(FilterFun, ObtainPending) }))}.
-terminate(_Reason, State = #fhc_state { clients = Clients }) ->
+terminate(_Reason, State = #fhc_state { clients = Clients,
+ elders = Elders }) ->
ets:delete(Clients),
+ ets:delete(Elders),
State.
code_change(_OldVsn, State, _Extra) ->
@@ -1064,7 +1068,7 @@ run_pending_item(#pending { kind = Kind,
requested = Requested,
from = From },
State = #fhc_state { clients = Clients }) ->
- gen_server:reply(From, ok),
+ gen_server2:reply(From, ok),
true = ets:update_element(Clients, Pid, {#cstate.blocked, false}),
update_counts(Kind, Pid, Requested, State).
@@ -1108,7 +1112,7 @@ reduce(State = #fhc_state { open_pending = OpenPending,
timer_ref = TRef }) ->
Now = now(),
{CStates, Sum, ClientCount} =
- dict:fold(fun (Pid, Eldest, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
+ ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
[#cstate { pending_closes = PendingCloses,
opened = Opened,
blocked = Blocked } = CState] =
@@ -1133,9 +1137,9 @@ reduce(State = #fhc_state { open_pending = OpenPending,
end
end,
case TRef of
- undefined -> {ok, TRef1} = timer:apply_after(
- ?FILE_HANDLES_CHECK_INTERVAL,
- gen_server, cast, [?SERVER, check_counts]),
+ undefined -> TRef1 = erlang:send_after(
+ ?FILE_HANDLES_CHECK_INTERVAL, ?SERVER,
+ check_counts),
State #fhc_state { timer_ref = TRef1 };
_ -> State
end.
diff --git a/src/gatherer.erl b/src/gatherer.erl
index aa43e9a9..fe976b50 100644
--- a/src/gatherer.erl
+++ b/src/gatherer.erl
@@ -27,7 +27,7 @@
-ifdef(use_specs).
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-spec(stop/1 :: (pid()) -> 'ok').
-spec(fork/1 :: (pid()) -> 'ok').
-spec(finish/1 :: (pid()) -> 'ok').
diff --git a/src/gen_server2.erl b/src/gen_server2.erl
index 60471181..ab6c4e64 100644
--- a/src/gen_server2.erl
+++ b/src/gen_server2.erl
@@ -598,41 +598,35 @@ adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
CurrentTO1 = Base + Extra,
{backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
-in({'$gen_cast', Msg}, GS2State = #gs2_state { prioritise_cast = PC,
- queue = Queue }) ->
- GS2State #gs2_state { queue = priority_queue:in(
- {'$gen_cast', Msg},
- PC(Msg, GS2State), Queue) };
-in({'$gen_call', From, Msg}, GS2State = #gs2_state { prioritise_call = PC,
- queue = Queue }) ->
- GS2State #gs2_state { queue = priority_queue:in(
- {'$gen_call', From, Msg},
- PC(Msg, From, GS2State), Queue) };
-in(Input, GS2State = #gs2_state { prioritise_info = PI, queue = Queue }) ->
- GS2State #gs2_state { queue = priority_queue:in(
- Input, PI(Input, GS2State), Queue) }.
-
-process_msg(Msg,
- GS2State = #gs2_state { parent = Parent,
- name = Name,
- debug = Debug }) ->
- case Msg of
- {system, From, Req} ->
- sys:handle_system_msg(
- Req, From, Parent, ?MODULE, Debug,
- GS2State);
- %% gen_server puts Hib on the end as the 7th arg, but that
- %% version of the function seems not to be documented so
- %% leaving out for now.
- {'EXIT', Parent, Reason} ->
- terminate(Reason, Msg, GS2State);
- _Msg when Debug =:= [] ->
- handle_msg(Msg, GS2State);
- _Msg ->
- Debug1 = sys:handle_debug(Debug, fun print_event/3,
- Name, {in, Msg}),
- handle_msg(Msg, GS2State #gs2_state { debug = Debug1 })
- end.
+in({'$gen_cast', Msg} = Input,
+ GS2State = #gs2_state { prioritise_cast = PC }) ->
+ in(Input, PC(Msg, GS2State), GS2State);
+in({'$gen_call', From, Msg} = Input,
+ GS2State = #gs2_state { prioritise_call = PC }) ->
+ in(Input, PC(Msg, From, GS2State), GS2State);
+in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) ->
+ in(Input, infinity, GS2State);
+in({system, _From, _Req} = Input, GS2State) ->
+ in(Input, infinity, GS2State);
+in(Input, GS2State = #gs2_state { prioritise_info = PI }) ->
+ in(Input, PI(Input, GS2State), GS2State).
+
+in(Input, Priority, GS2State = #gs2_state { queue = Queue }) ->
+ GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }.
+
+process_msg({system, From, Req},
+ GS2State = #gs2_state { parent = Parent, debug = Debug }) ->
+ %% gen_server puts Hib on the end as the 7th arg, but that version
+ %% of the fun seems not to be documented so leaving out for now.
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State);
+process_msg({'EXIT', Parent, Reason} = Msg,
+ GS2State = #gs2_state { parent = Parent }) ->
+ terminate(Reason, Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { debug = [] }) ->
+ handle_msg(Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) ->
+ Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}),
+ handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }).
%%% ---------------------------------------------------
%%% Send/recive functions
diff --git a/src/gm.erl b/src/gm.erl
index 8b7dc70c..8c838a70 100644
--- a/src/gm.erl
+++ b/src/gm.erl
@@ -376,11 +376,11 @@
confirmed_broadcast/2, group_members/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3, prioritise_cast/2, prioritise_info/2]).
+ code_change/3, prioritise_info/2]).
-export([behaviour_info/1]).
--export([table_definitions/0, flush/1]).
+-export([table_definitions/0]).
-define(GROUP_TABLE, gm_group).
-define(HIBERNATE_AFTER_MIN, 1000).
@@ -422,9 +422,9 @@
-type(group_name() :: any()).
--spec(create_tables/0 :: () -> 'ok').
+-spec(create_tables/0 :: () -> 'ok' | {'aborted', any()}).
-spec(start_link/3 :: (group_name(), atom(), any()) ->
- {'ok', pid()} | {'error', any()}).
+ rabbit_types:ok_pid_or_error()).
-spec(leave/1 :: (pid()) -> 'ok').
-spec(broadcast/2 :: (pid(), any()) -> 'ok').
-spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok').
@@ -511,9 +511,6 @@ confirmed_broadcast(Server, Msg) ->
group_members(Server) ->
gen_server2:call(Server, group_members, infinity).
-flush(Server) ->
- gen_server2:cast(Server, flush).
-
init([GroupName, Module, Args]) ->
{MegaSecs, Secs, MicroSecs} = now(),
@@ -629,12 +626,12 @@ handle_cast(join, State = #state { self = Self,
{Module:joined(Args, all_known_members(View)), State1});
handle_cast(leave, State) ->
- {stop, normal, State};
+ {stop, normal, State}.
-handle_cast(flush, State) ->
- noreply(
- flush_broadcast_buffer(State #state { broadcast_timer = undefined })).
+handle_info(flush, State) ->
+ noreply(
+ flush_broadcast_buffer(State #state { broadcast_timer = undefined }));
handle_info({'DOWN', MRef, process, _Pid, _Reason},
State = #state { self = Self,
@@ -684,9 +681,7 @@ terminate(Reason, State = #state { module = Module,
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-prioritise_cast(flush, _State) -> 1;
-prioritise_cast(_ , _State) -> 0.
-
+prioritise_info(flush, _State) -> 1;
prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _State) -> 1;
prioritise_info(_ , _State) -> 0.
@@ -808,10 +803,10 @@ ensure_broadcast_timer(State = #state { broadcast_buffer = [],
State;
ensure_broadcast_timer(State = #state { broadcast_buffer = [],
broadcast_timer = TRef }) ->
- timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State #state { broadcast_timer = undefined };
ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
- {ok, TRef} = timer:apply_after(?BROADCAST_TIMER, ?MODULE, flush, [self()]),
+ TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
State #state { broadcast_timer = TRef };
ensure_broadcast_timer(State) ->
State.
diff --git a/src/mirrored_supervisor.erl b/src/mirrored_supervisor.erl
new file mode 100644
index 00000000..8dfe39f8
--- /dev/null
+++ b/src/mirrored_supervisor.erl
@@ -0,0 +1,542 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is VMware, Inc.
+%% Copyright (c) 2011 VMware, Inc. All rights reserved.
+%%
+
+-module(mirrored_supervisor).
+
+%% Mirrored Supervisor
+%% ===================
+%%
+%% This module implements a new type of supervisor. It acts like a
+%% normal supervisor, but at creation time you also provide the name
+%% of a process group to join. All the supervisors within the
+%% process group act like a single large distributed supervisor:
+%%
+%% * A process with a given child_id will only exist on one
+%% supervisor within the group.
+%%
+%% * If one supervisor fails, children may migrate to surviving
+%% supervisors within the group.
+%%
+%% In almost all cases you will want to use the module name for the
+%% process group. Using multiple process groups with the same module
+%% name is supported. Having multiple module names for the same
+%% process group will lead to undefined behaviour.
+%%
+%% Motivation
+%% ----------
+%%
+%% Sometimes you have processes which:
+%%
+%% * Only need to exist once per cluster.
+%%
+%% * Does not contain much state (or can reconstruct its state easily).
+%%
+%% * Needs to be restarted elsewhere should it be running on a node
+%% which fails.
+%%
+%% By creating a mirrored supervisor group with one supervisor on
+%% each node, that's what you get.
+%%
+%%
+%% API use
+%% -------
+%%
+%% This is basically the same as for supervisor, except that:
+%%
+%% 1) start_link(Module, Args) becomes
+%% start_link(Group, Module, Args).
+%%
+%% 2) start_link({local, Name}, Module, Args) becomes
+%% start_link({local, Name}, Group, Module, Args).
+%%
+%% 3) start_link({global, Name}, Module, Args) is not available.
+%%
+%% 4) The restart strategy simple_one_for_one is not available.
+%%
+%% 5) Mnesia is used to hold global state. At some point your
+%% application should invoke create_tables() (or table_definitions()
+%% if it wants to manage table creation itself).
+%%
+%% Internals
+%% ---------
+%%
+%% Each mirrored_supervisor consists of three processes - the overall
+%% supervisor, the delegate supervisor and the mirroring server. The
+%% overall supervisor supervises the other two processes. Its pid is
+%% the one returned from start_link; the pids of the other two
+%% processes are effectively hidden in the API.
+%%
+%% The delegate supervisor is in charge of supervising all the child
+%% processes that are added to the supervisor as usual.
+%%
+%% The mirroring server intercepts calls to the supervisor API
+%% (directed at the overall supervisor), does any special handling,
+%% and forwards everything to the delegate supervisor.
+%%
+%% This module implements all three, hence init/1 is somewhat overloaded.
+%%
+%% The mirroring server creates and joins a process group on
+%% startup. It monitors all the existing members of this group, and
+%% broadcasts a "hello" message to them so that they can monitor it in
+%% turn. When it receives a 'DOWN' message, it checks to see if it's
+%% the "first" server in the group and restarts all the child
+%% processes from the dead supervisor if so.
+%%
+%% In the future we might load balance this.
+%%
+%% Startup is slightly fiddly. The mirroring server needs to know the
+%% Pid of the overall supervisor, but we don't have that until it has
+%% started. Therefore we set this after the fact. We also start any
+%% children we found in Module:init() at this point, since starting
+%% children requires knowing the overall supervisor pid.
+
+-define(SUPERVISOR, supervisor2).
+-define(GEN_SERVER, gen_server2).
+-define(PG2, pg2_fixed).
+
+-define(TABLE, mirrored_sup_childspec).
+-define(TABLE_DEF,
+ {?TABLE,
+ [{record_name, mirrored_sup_childspec},
+ {type, ordered_set},
+ {attributes, record_info(fields, mirrored_sup_childspec)}]}).
+-define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}).
+
+-export([start_link/3, start_link/4,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1, check_childspecs/1]).
+
+-export([behaviour_info/1]).
+
+-behaviour(?GEN_SERVER).
+-behaviour(?SUPERVISOR).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-export([start_internal/2]).
+-export([create_tables/0, table_definitions/0]).
+
+-record(mirrored_sup_childspec, {key, mirroring_pid, childspec}).
+
+-record(state, {overall,
+ delegate,
+ group,
+ initial_childspecs}).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type child() :: pid() | 'undefined'.
+-type child_id() :: term().
+-type mfargs() :: {M :: module(), F :: atom(), A :: [term()] | 'undefined'}.
+-type modules() :: [module()] | 'dynamic'.
+-type restart() :: 'permanent' | 'transient' | 'temporary'.
+-type shutdown() :: 'brutal_kill' | timeout().
+-type worker() :: 'worker' | 'supervisor'.
+-type sup_name() :: {'local', Name :: atom()} | {'global', Name :: atom()}.
+-type sup_ref() :: (Name :: atom())
+ | {Name :: atom(), Node :: node()}
+ | {'global', Name :: atom()}
+ | pid().
+-type child_spec() :: {Id :: child_id(),
+ StartFunc :: mfargs(),
+ Restart :: restart(),
+ Shutdown :: shutdown(),
+ Type :: worker(),
+ Modules :: modules()}.
+
+-type startlink_err() :: {'already_started', pid()} | 'shutdown' | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-type startchild_err() :: 'already_present'
+ | {'already_started', Child :: child()} | term().
+-type startchild_ret() :: {'ok', Child :: child()}
+ | {'ok', Child :: child(), Info :: term()}
+ | {'error', startchild_err()}.
+
+-type group_name() :: any().
+
+-spec start_link(GroupName, Module, Args) -> startlink_ret() when
+ GroupName :: group_name(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_link(SupName, GroupName, Module, Args) -> startlink_ret() when
+ SupName :: sup_name(),
+ GroupName :: group_name(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_child(SupRef, ChildSpec) -> startchild_ret() when
+ SupRef :: sup_ref(),
+ ChildSpec :: child_spec() | (List :: [term()]).
+
+-spec restart_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: {'ok', Child :: child()}
+ | {'ok', Child :: child(), Info :: term()}
+ | {'error', Error},
+ Error :: 'running' | 'not_found' | 'simple_one_for_one' | term().
+
+-spec delete_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'running' | 'not_found' | 'simple_one_for_one'.
+
+-spec terminate_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: pid() | child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'not_found' | 'simple_one_for_one'.
+
+-spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
+ SupRef :: sup_ref(),
+ Id :: child_id() | 'undefined',
+ Child :: child(),
+ Type :: worker(),
+ Modules :: modules().
+
+-spec check_childspecs(ChildSpecs) -> Result when
+ ChildSpecs :: [child_spec()],
+ Result :: 'ok' | {'error', Error :: term()}.
+
+-spec start_internal(Group, ChildSpecs) -> Result when
+ Group :: group_name(),
+ ChildSpecs :: [child_spec()],
+ Result :: startlink_ret().
+
+-spec create_tables() -> Result when
+ Result :: 'ok'.
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Group, Mod, Args) ->
+ start_link0([], Group, init(Mod, Args)).
+
+start_link({local, SupName}, Group, Mod, Args) ->
+ start_link0([{local, SupName}], Group, init(Mod, Args));
+
+start_link({global, _SupName}, _Group, _Mod, _Args) ->
+ erlang:error(badarg).
+
+start_link0(Prefix, Group, Init) ->
+ case apply(?SUPERVISOR, start_link,
+ Prefix ++ [?MODULE, {overall, Group, Init}]) of
+ {ok, Pid} -> call(Pid, {init, Pid}),
+ {ok, Pid};
+ Other -> Other
+ end.
+
+init(Mod, Args) ->
+ case Mod:init(Args) of
+ {ok, {{Bad, _, _}, _ChildSpecs}} when
+ Bad =:= simple_one_for_one orelse
+ Bad =:= simple_one_for_one_terminate -> erlang:error(badarg);
+ Init -> Init
+ end.
+
+start_child(Sup, ChildSpec) -> call(Sup, {start_child, ChildSpec}).
+delete_child(Sup, Id) -> find_call(Sup, Id, {delete_child, Id}).
+restart_child(Sup, Id) -> find_call(Sup, Id, {msg, restart_child, [Id]}).
+terminate_child(Sup, Id) -> find_call(Sup, Id, {msg, terminate_child, [Id]}).
+which_children(Sup) -> fold(which_children, Sup, fun lists:append/2).
+count_children(Sup) -> fold(count_children, Sup, fun add_proplists/2).
+check_childspecs(Specs) -> ?SUPERVISOR:check_childspecs(Specs).
+
+behaviour_info(callbacks) -> [{init,1}];
+behaviour_info(_Other) -> undefined.
+
+call(Sup, Msg) ->
+ ?GEN_SERVER:call(child(Sup, mirroring), Msg, infinity).
+
+find_call(Sup, Id, Msg) ->
+ Group = call(Sup, group),
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1',
+ key = {Group, Id},
+ _ = '_'},
+ %% If we did this inside a tx we could still have failover
+ %% immediately after the tx - we can't be 100% here. So we may as
+ %% well dirty_select.
+ case mnesia:dirty_select(?TABLE, [{MatchHead, [], ['$1']}]) of
+ [Mirror] -> ?GEN_SERVER:call(Mirror, Msg, infinity);
+ [] -> {error, not_found}
+ end.
+
+fold(FunAtom, Sup, AggFun) ->
+ Group = call(Sup, group),
+ lists:foldl(AggFun, [],
+ [apply(?SUPERVISOR, FunAtom, [D]) ||
+ M <- ?PG2:get_members(Group),
+ D <- [?GEN_SERVER:call(M, delegate_supervisor, infinity)]]).
+
+child(Sup, Id) ->
+ [Pid] = [Pid || {Id1, Pid, _, _} <- ?SUPERVISOR:which_children(Sup),
+ Id1 =:= Id],
+ Pid.
+
+%%----------------------------------------------------------------------------
+
+start_internal(Group, ChildSpecs) ->
+ ?GEN_SERVER:start_link(?MODULE, {mirroring, Group, ChildSpecs},
+ [{timeout, infinity}]).
+
+%%----------------------------------------------------------------------------
+
+init({overall, Group, Init}) ->
+ case Init of
+ {ok, {Restart, ChildSpecs}} ->
+ Delegate = {delegate, {?SUPERVISOR, start_link,
+ [?MODULE, {delegate, Restart}]},
+ temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
+ Mirroring = {mirroring, {?MODULE, start_internal,
+ [Group, ChildSpecs]},
+ permanent, 16#ffffffff, worker, [?MODULE]},
+ %% Important: Delegate MUST start before Mirroring so that
+ %% when we shut down from above it shuts down last, so
+ %% Mirroring does not see it die.
+ %%
+ %% See comment in handle_info('DOWN', ...) below
+ {ok, {{one_for_all, 0, 1}, [Delegate, Mirroring]}};
+ ignore ->
+ ignore
+ end;
+
+init({delegate, Restart}) ->
+ {ok, {Restart, []}};
+
+init({mirroring, Group, ChildSpecs}) ->
+ {ok, #state{group = Group, initial_childspecs = ChildSpecs}}.
+
+handle_call({init, Overall}, _From,
+ State = #state{overall = undefined,
+ delegate = undefined,
+ group = Group,
+ initial_childspecs = ChildSpecs}) ->
+ process_flag(trap_exit, true),
+ ?PG2:create(Group),
+ ok = ?PG2:join(Group, self()),
+ Rest = ?PG2:get_members(Group) -- [self()],
+ case Rest of
+ [] -> {atomic, _} = mnesia:transaction(fun() -> delete_all(Group) end);
+ _ -> ok
+ end,
+ [begin
+ ?GEN_SERVER:cast(Pid, {ensure_monitoring, self()}),
+ erlang:monitor(process, Pid)
+ end || Pid <- Rest],
+ Delegate = child(Overall, delegate),
+ erlang:monitor(process, Delegate),
+ [maybe_start(Group, Delegate, S) || S <- ChildSpecs],
+ {reply, ok, State#state{overall = Overall, delegate = Delegate}};
+
+handle_call({start_child, ChildSpec}, _From,
+ State = #state{delegate = Delegate,
+ group = Group}) ->
+ {reply, maybe_start(Group, Delegate, ChildSpec), State};
+
+handle_call({delete_child, Id}, _From, State = #state{delegate = Delegate,
+ group = Group}) ->
+ {reply, stop(Group, Delegate, Id), State};
+
+handle_call({msg, F, A}, _From, State = #state{delegate = Delegate}) ->
+ {reply, apply(?SUPERVISOR, F, [Delegate | A]), State};
+
+handle_call(delegate_supervisor, _From, State = #state{delegate = Delegate}) ->
+ {reply, Delegate, State};
+
+handle_call(group, _From, State = #state{group = Group}) ->
+ {reply, Group, State};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ensure_monitoring, Pid}, State) ->
+ erlang:monitor(process, Pid),
+ {noreply, State};
+
+handle_cast({die, Reason}, State = #state{group = Group}) ->
+ tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{delegate = Pid, group = Group}) ->
+ %% Since the delegate is temporary, its death won't cause us to
+ %% die. Since the overall supervisor kills processes in reverse
+ %% order when shutting down "from above" and we started after the
+ %% delegate, if we see the delegate die then that means it died
+ %% "from below" i.e. due to the behaviour of its children, not
+ %% because the whole app was being torn down.
+ %%
+ %% Therefore if we get here we know we need to cause the entire
+ %% mirrored sup to shut down, not just fail over.
+ tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_info({'DOWN', _Ref, process, Pid, _Reason},
+ State = #state{delegate = Delegate, group = Group}) ->
+ %% TODO load balance this
+ %% No guarantee pg2 will have received the DOWN before us.
+ Self = self(),
+ case lists:sort(?PG2:get_members(Group)) -- [Pid] of
+ [Self | _] -> {atomic, ChildSpecs} =
+ mnesia:transaction(fun() -> update_all(Pid) end),
+ [start(Delegate, ChildSpec) || ChildSpec <- ChildSpecs];
+ _ -> ok
+ end,
+ {noreply, State};
+
+handle_info(Info, State) ->
+ {stop, {unexpected_info, Info}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+tell_all_peers_to_die(Group, Reason) ->
+ [?GEN_SERVER:cast(P, {die, Reason}) ||
+ P <- ?PG2:get_members(Group) -- [self()]].
+
+maybe_start(Group, Delegate, ChildSpec) ->
+ case mnesia:transaction(fun() ->
+ check_start(Group, Delegate, ChildSpec)
+ end) of
+ {atomic, start} -> start(Delegate, ChildSpec);
+ {atomic, undefined} -> {error, already_present};
+ {atomic, Pid} -> {error, {already_started, Pid}};
+ %% If we are torn down while in the transaction...
+ {aborted, E} -> {error, E}
+ end.
+
+check_start(Group, Delegate, ChildSpec) ->
+ case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
+ [] -> write(Group, ChildSpec),
+ start;
+ [S] -> #mirrored_sup_childspec{key = {Group, Id},
+ mirroring_pid = Pid} = S,
+ case self() of
+ Pid -> child(Delegate, Id);
+ _ -> case supervisor(Pid) of
+ dead -> write(Group, ChildSpec),
+ start;
+ Delegate0 -> child(Delegate0, Id)
+ end
+ end
+ end.
+
+supervisor(Pid) ->
+ with_exit_handler(
+ fun() -> dead end,
+ fun() -> gen_server:call(Pid, delegate_supervisor, infinity) end).
+
+write(Group, ChildSpec) ->
+ ok = mnesia:write(
+ #mirrored_sup_childspec{key = {Group, id(ChildSpec)},
+ mirroring_pid = self(),
+ childspec = ChildSpec}),
+ ChildSpec.
+
+delete(Group, Id) ->
+ ok = mnesia:delete({?TABLE, {Group, Id}}).
+
+start(Delegate, ChildSpec) ->
+ apply(?SUPERVISOR, start_child, [Delegate, ChildSpec]).
+
+stop(Group, Delegate, Id) ->
+ case mnesia:transaction(fun() -> check_stop(Group, Delegate, Id) end) of
+ {atomic, deleted} -> apply(?SUPERVISOR, delete_child, [Delegate, Id]);
+ {atomic, running} -> {error, running};
+ {aborted, E} -> {error, E}
+ end.
+
+check_stop(Group, Delegate, Id) ->
+ case child(Delegate, Id) of
+ undefined -> delete(Group, Id),
+ deleted;
+ _ -> running
+ end.
+
+id({Id, _, _, _, _, _}) -> Id.
+
+update_all(OldPid) ->
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = OldPid,
+ key = '$1',
+ childspec = '$2',
+ _ = '_'},
+ [write(Group, C) ||
+ [{Group, _Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])].
+
+delete_all(Group) ->
+ MatchHead = #mirrored_sup_childspec{key = {Group, '_'},
+ childspec = '$1',
+ _ = '_'},
+ [delete(Group, id(C)) ||
+ C <- mnesia:select(?TABLE, [{MatchHead, [], ['$1']}])].
+
+%%----------------------------------------------------------------------------
+
+create_tables() ->
+ create_tables([?TABLE_DEF]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Ts]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Ts);
+ {aborted, {already_exists, ?TABLE}} -> create_tables(Ts);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE_DEF,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+%%----------------------------------------------------------------------------
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when R =:= noproc; R =:= nodedown;
+ R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end.
+
+add_proplists(P1, P2) ->
+ add_proplists(lists:keysort(1, P1), lists:keysort(1, P2), []).
+add_proplists([], P2, Acc) -> P2 ++ Acc;
+add_proplists(P1, [], Acc) -> P1 ++ Acc;
+add_proplists([{K, V1} | P1], [{K, V2} | P2], Acc) ->
+ add_proplists(P1, P2, [{K, V1 + V2} | Acc]);
+add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
+ add_proplists(P1, P2, [KV | Acc]);
+add_proplists(P1, [KV | P2], Acc) ->
+ add_proplists(P1, P2, [KV | Acc]).
diff --git a/src/mirrored_supervisor_tests.erl b/src/mirrored_supervisor_tests.erl
new file mode 100644
index 00000000..ee9c7593
--- /dev/null
+++ b/src/mirrored_supervisor_tests.erl
@@ -0,0 +1,309 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is VMware, Inc.
+%% Copyright (c) 2011 VMware, Inc. All rights reserved.
+%%
+
+-module(mirrored_supervisor_tests).
+
+-compile([export_all]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-behaviour(gen_server).
+-behaviour(mirrored_supervisor).
+
+-define(MS, mirrored_supervisor).
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+all_tests() ->
+ passed = test_migrate(),
+ passed = test_migrate_twice(),
+ passed = test_already_there(),
+ passed = test_delete_restart(),
+ passed = test_which_children(),
+ passed = test_large_group(),
+ passed = test_childspecs_at_init(),
+ passed = test_anonymous_supervisors(),
+ passed = test_no_migration_on_shutdown(),
+ passed = test_start_idempotence(),
+ passed = test_unsupported(),
+ passed = test_ignore(),
+ passed.
+
+%% Simplest test
+test_migrate() ->
+ with_sups(fun([A, _]) ->
+ ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b]).
+
+%% Is migration transitive?
+test_migrate_twice() ->
+ with_sups(fun([A, B]) ->
+ ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill(A, Pid1),
+ {ok, C} = start_sup(c),
+ Pid2 = pid_of(worker),
+ kill(B, Pid2),
+ Pid3 = pid_of(worker),
+ false = (Pid1 =:= Pid3),
+ kill(C)
+ end, [a, b]).
+
+%% Can't start the same child twice
+test_already_there() ->
+ with_sups(fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, S),
+ {error, {already_started, Pid}} = ?MS:start_child(b, S)
+ end, [a, b]).
+
+%% Deleting and restarting should work as per a normal supervisor
+test_delete_restart() ->
+ with_sups(fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid1} = ?MS:start_child(a, S),
+ {error, running} = ?MS:delete_child(a, worker),
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid2} = ?MS:start_child(b, S),
+ false = (Pid1 =:= Pid2),
+ ok = ?MS:terminate_child(b, worker),
+ {ok, Pid3} = ?MS:restart_child(b, worker),
+ Pid3 = pid_of(worker),
+ false = (Pid2 =:= Pid3),
+ %% Not the same supervisor as the worker is on
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid4} = ?MS:start_child(a, S),
+ false = (Pid3 =:= Pid4)
+ end, [a, b]).
+
+test_which_children() ->
+ with_sups(
+ fun([A, B] = Both) ->
+ ?MS:start_child(A, childspec(worker)),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ok = ?MS:terminate_child(a, worker),
+ assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
+ {ok, _} = ?MS:restart_child(a, worker),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ?MS:start_child(B, childspec(worker2)),
+ assert_wc(Both, fun (C) -> 2 = length(C) end)
+ end, [a, b]).
+
+assert_wc(Sups, Fun) ->
+ [Fun(?MS:which_children(Sup)) || Sup <- Sups].
+
+wc_pid(Child) ->
+ {worker, Pid, worker, [mirrored_supervisor_tests]} = Child,
+ Pid.
+
+%% Not all the members of the group should actually do the failover
+test_large_group() ->
+ with_sups(fun([A, _, _, _]) ->
+ ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b, c, d]).
+
+%% Do childspecs work when returned from init?
+test_childspecs_at_init() ->
+ S = childspec(worker),
+ with_sups(fun([A, _]) ->
+ Pid1 = pid_of(worker),
+ kill(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [{a, [S]}, {b, [S]}]).
+
+test_anonymous_supervisors() ->
+ with_sups(fun([A, _B]) ->
+ ?MS:start_child(A, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [anon, anon]).
+
+%% When a mirrored_supervisor terminates, we should not migrate, but
+%% the whole supervisor group should shut down. To test this we set up
+%% a situation where the gen_server will only fail if it's running
+%% under the supervisor called 'evil'. It should not migrate to
+%% 'good' and survive, rather the whole group should go away.
+test_no_migration_on_shutdown() ->
+ with_sups(fun([Evil, _]) ->
+ ?MS:start_child(Evil, childspec(worker)),
+ try
+ call(worker, ping),
+ exit(worker_should_not_have_migrated)
+ catch exit:{timeout_waiting_for_server, _} ->
+ ok
+ end
+ end, [evil, good]).
+
+test_start_idempotence() ->
+ with_sups(fun([_]) ->
+ CS = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, CS),
+ {error, {already_started, Pid}} = ?MS:start_child(a, CS),
+ ?MS:terminate_child(a, worker),
+ {error, already_present} = ?MS:start_child(a, CS)
+ end, [a]).
+
+test_unsupported() ->
+ try
+ ?MS:start_link({global, foo}, get_group(group), ?MODULE,
+ {sup, one_for_one, []}),
+ exit(no_global)
+ catch error:badarg ->
+ ok
+ end,
+ try
+ ?MS:start_link({local, foo}, get_group(group), ?MODULE,
+ {sup, simple_one_for_one, []}),
+ exit(no_sofo)
+ catch error:badarg ->
+ ok
+ end,
+ passed.
+
+%% Just test we don't blow up
+test_ignore() ->
+ ?MS:start_link({local, foo}, get_group(group), ?MODULE,
+ {sup, fake_strategy_for_ignore, []}),
+ passed.
+
+%% ---------------------------------------------------------------------------
+
+with_sups(Fun, Sups) ->
+ inc_group(),
+ Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
+ Fun(Pids),
+ [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
+ passed.
+
+start_sup(Spec) ->
+ start_sup(Spec, group).
+
+start_sup({Name, ChildSpecs}, Group) ->
+ {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
+ %% We are not a supervisor, when we kill the supervisor we do not
+ %% want to die!
+ unlink(Pid),
+ {ok, Pid};
+
+start_sup(Name, Group) ->
+ start_sup({Name, []}, Group).
+
+start_sup0(anon, Group, ChildSpecs) ->
+ ?MS:start_link(Group, ?MODULE, {sup, one_for_one, ChildSpecs});
+
+start_sup0(Name, Group, ChildSpecs) ->
+ ?MS:start_link({local, Name}, Group, ?MODULE,
+ {sup, one_for_one, ChildSpecs}).
+
+childspec(Id) ->
+ {Id, {?MODULE, start_gs, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
+
+start_gs(Id) ->
+ gen_server:start_link({local, Id}, ?MODULE, server, []).
+
+pid_of(Id) ->
+ {received, Pid, ping} = call(Id, ping),
+ Pid.
+
+inc_group() ->
+ Count = case get(counter) of
+ undefined -> 0;
+ C -> C
+ end + 1,
+ put(counter, Count).
+
+get_group(Group) ->
+ {Group, get(counter)}.
+
+call(Id, Msg) -> call(Id, Msg, 100, 10).
+
+call(Id, Msg, 0, _Decr) ->
+ exit({timeout_waiting_for_server, {Id, Msg}});
+
+call(Id, Msg, MaxDelay, Decr) ->
+ try
+ gen_server:call(Id, Msg, infinity)
+ catch exit:_ -> timer:sleep(Decr),
+ call(Id, Msg, MaxDelay - Decr, Decr)
+ end.
+
+kill(Pid) -> kill(Pid, []).
+kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
+kill(Pid, Waits) ->
+ erlang:monitor(process, Pid),
+ [erlang:monitor(process, P) || P <- Waits],
+ exit(Pid, kill),
+ kill_wait(Pid),
+ [kill_wait(P) || P <- Waits].
+
+kill_wait(Pid) ->
+ receive
+ {'DOWN', _Ref, process, Pid, _Reason} ->
+ ok
+ end.
+
+%% ---------------------------------------------------------------------------
+%% Dumb gen_server we can supervise
+%% ---------------------------------------------------------------------------
+
+init({sup, fake_strategy_for_ignore, _ChildSpecs}) ->
+ ignore;
+
+init({sup, Strategy, ChildSpecs}) ->
+ {ok, {{Strategy, 0, 1}, ChildSpecs}};
+
+init(server) ->
+ {ok, state}.
+
+handle_call(Msg, _From, State) ->
+ die_if_my_supervisor_is_evil(),
+ {reply, {received, self(), Msg}, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+die_if_my_supervisor_is_evil() ->
+ try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
+ false -> ok;
+ _ -> exit(doooom)
+ catch
+ exit:{noproc, _} -> ok
+ end.
diff --git a/src/pg2_fixed.erl b/src/pg2_fixed.erl
new file mode 100644
index 00000000..8926b83b
--- /dev/null
+++ b/src/pg2_fixed.erl
@@ -0,0 +1,400 @@
+%% This is the version of pg2 from R14B02, which contains the fix
+%% described at
+%% http://erlang.2086793.n4.nabble.com/pg2-still-busted-in-R13B04-td2230601.html.
+%% The changes are a search-and-replace to rename the module and avoid
+%% clashes with other versions of pg2, and also a simple rewrite of
+%% "andalso" and "orelse" expressions to case statements where the second
+%% operand is not a boolean since R12B does not allow this.
+
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(pg2_fixed).
+
+-export([create/1, delete/1, join/2, leave/2]).
+-export([get_members/1, get_local_members/1]).
+-export([get_closest_pid/1, which_groups/0]).
+-export([start/0,start_link/0,init/1,handle_call/3,handle_cast/2,handle_info/2,
+ terminate/2]).
+
+%%% As of R13B03 monitors are used instead of links.
+
+%%%
+%%% Exported functions
+%%%
+
+-spec start_link() -> {'ok', pid()} | {'error', term()}.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+-spec start() -> {'ok', pid()} | {'error', term()}.
+
+start() ->
+ ensure_started().
+
+-spec create(term()) -> 'ok'.
+
+create(Name) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ false ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE, {create, Name})
+ end),
+ ok;
+ true ->
+ ok
+ end.
+
+-type name() :: term().
+
+-spec delete(name()) -> 'ok'.
+
+delete(Name) ->
+ ensure_started(),
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE, {delete, Name})
+ end),
+ ok.
+
+-spec join(name(), pid()) -> 'ok' | {'error', {'no_such_group', term()}}.
+
+join(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ false ->
+ {error, {no_such_group, Name}};
+ true ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE,
+ {join, Name, Pid})
+ end),
+ ok
+ end.
+
+-spec leave(name(), pid()) -> 'ok' | {'error', {'no_such_group', name()}}.
+
+leave(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ false ->
+ {error, {no_such_group, Name}};
+ true ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE,
+ {leave, Name, Pid})
+ end),
+ ok
+ end.
+
+-type get_members_ret() :: [pid()] | {'error', {'no_such_group', name()}}.
+
+-spec get_members(name()) -> get_members_ret().
+
+get_members(Name) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true ->
+ group_members(Name);
+ false ->
+ {error, {no_such_group, Name}}
+ end.
+
+-spec get_local_members(name()) -> get_members_ret().
+
+get_local_members(Name) ->
+ ensure_started(),
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true ->
+ local_group_members(Name);
+ false ->
+ {error, {no_such_group, Name}}
+ end.
+
+-spec which_groups() -> [name()].
+
+which_groups() ->
+ ensure_started(),
+ all_groups().
+
+-type gcp_error_reason() :: {'no_process', term()} | {'no_such_group', term()}.
+
+-spec get_closest_pid(term()) -> pid() | {'error', gcp_error_reason()}.
+
+get_closest_pid(Name) ->
+ case get_local_members(Name) of
+ [Pid] ->
+ Pid;
+ [] ->
+ {_,_,X} = erlang:now(),
+ case get_members(Name) of
+ [] -> {error, {no_process, Name}};
+ Members ->
+ lists:nth((X rem length(Members))+1, Members)
+ end;
+ Members when is_list(Members) ->
+ {_,_,X} = erlang:now(),
+ lists:nth((X rem length(Members))+1, Members);
+ Else ->
+ Else
+ end.
+
+%%%
+%%% Callback functions from gen_server
+%%%
+
+-record(state, {}).
+
+-spec init([]) -> {'ok', #state{}}.
+
+init([]) ->
+ Ns = nodes(),
+ net_kernel:monitor_nodes(true),
+ lists:foreach(fun(N) ->
+ {?MODULE, N} ! {new_pg2_fixed, node()},
+ self() ! {nodeup, N}
+ end, Ns),
+ pg2_fixed_table = ets:new(pg2_fixed_table, [ordered_set, protected, named_table]),
+ {ok, #state{}}.
+
+-type call() :: {'create', name()}
+ | {'delete', name()}
+ | {'join', name(), pid()}
+ | {'leave', name(), pid()}.
+
+-spec handle_call(call(), _, #state{}) ->
+ {'reply', 'ok', #state{}}.
+
+handle_call({create, Name}, _From, S) ->
+ assure_group(Name),
+ {reply, ok, S};
+handle_call({join, Name, Pid}, _From, S) ->
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true -> join_group(Name, Pid);
+ _ -> ok
+ end,
+ {reply, ok, S};
+handle_call({leave, Name, Pid}, _From, S) ->
+ case ets:member(pg2_fixed_table, {group, Name}) of
+ true -> leave_group(Name, Pid);
+ _ -> ok
+ end,
+ {reply, ok, S};
+handle_call({delete, Name}, _From, S) ->
+ delete_group(Name),
+ {reply, ok, S};
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The pg2_fixed server received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+-type all_members() :: [[name(),...]].
+-type cast() :: {'exchange', node(), all_members()}
+ | {'del_member', name(), pid()}.
+
+-spec handle_cast(cast(), #state{}) -> {'noreply', #state{}}.
+
+handle_cast({exchange, _Node, List}, S) ->
+ store(List),
+ {noreply, S};
+handle_cast(_, S) ->
+ %% Ignore {del_member, Name, Pid}.
+ {noreply, S}.
+
+-spec handle_info(tuple(), #state{}) -> {'noreply', #state{}}.
+
+handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S) ->
+ member_died(MonitorRef),
+ {noreply, S};
+handle_info({nodeup, Node}, S) ->
+ gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
+ {noreply, S};
+handle_info({new_pg2_fixed, Node}, S) ->
+ gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
+ {noreply, S};
+handle_info(_, S) ->
+ {noreply, S}.
+
+-spec terminate(term(), #state{}) -> 'ok'.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(pg2_fixed_table),
+ ok.
+
+%%%
+%%% Local functions
+%%%
+
+%%% One ETS table, pg2_fixed_table, is used for bookkeeping. The type of the
+%%% table is ordered_set, and the fast matching of partially
+%%% instantiated keys is used extensively.
+%%%
+%%% {{group, Name}}
+%%% Process group Name.
+%%% {{ref, Pid}, RPid, MonitorRef, Counter}
+%%% {{ref, MonitorRef}, Pid}
+%%% Each process has one monitor. Sometimes a process is spawned to
+%%% monitor the pid (RPid). Counter is incremented when the Pid joins
+%%% some group.
+%%% {{member, Name, Pid}, GroupCounter}
+%%% {{local_member, Name, Pid}}
+%%% Pid is a member of group Name, GroupCounter is incremented when the
+%%% Pid joins the group Name.
+%%% {{pid, Pid, Name}}
+%%% Pid is a member of group Name.
+
+store(List) ->
+ _ = [case assure_group(Name) of
+ true ->
+ [join_group(Name, P) || P <- Members -- group_members(Name)];
+ _ ->
+ ok
+ end || [Name, Members] <- List],
+ ok.
+
+assure_group(Name) ->
+ Key = {group, Name},
+ ets:member(pg2_fixed_table, Key) orelse true =:= ets:insert(pg2_fixed_table, {Key}).
+
+delete_group(Name) ->
+ _ = [leave_group(Name, Pid) || Pid <- group_members(Name)],
+ true = ets:delete(pg2_fixed_table, {group, Name}),
+ ok.
+
+member_died(Ref) ->
+ [{{ref, Ref}, Pid}] = ets:lookup(pg2_fixed_table, {ref, Ref}),
+ Names = member_groups(Pid),
+ _ = [leave_group(Name, P) ||
+ Name <- Names,
+ P <- member_in_group(Pid, Name)],
+ %% Kept for backward compatibility with links. Can be removed, eventually.
+ _ = [gen_server:abcast(nodes(), ?MODULE, {del_member, Name, Pid}) ||
+ Name <- Names],
+ ok.
+
+join_group(Name, Pid) ->
+ Ref_Pid = {ref, Pid},
+ try _ = ets:update_counter(pg2_fixed_table, Ref_Pid, {4, +1})
+ catch _:_ ->
+ {RPid, Ref} = do_monitor(Pid),
+ true = ets:insert(pg2_fixed_table, {Ref_Pid, RPid, Ref, 1}),
+ true = ets:insert(pg2_fixed_table, {{ref, Ref}, Pid})
+ end,
+ Member_Name_Pid = {member, Name, Pid},
+ try _ = ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, +1, 1, 1})
+ catch _:_ ->
+ true = ets:insert(pg2_fixed_table, {Member_Name_Pid, 1}),
+ _ = [ets:insert(pg2_fixed_table, {{local_member, Name, Pid}}) ||
+ node(Pid) =:= node()],
+ true = ets:insert(pg2_fixed_table, {{pid, Pid, Name}})
+ end.
+
+leave_group(Name, Pid) ->
+ Member_Name_Pid = {member, Name, Pid},
+ try ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, -1, 0, 0}) of
+ N ->
+ if
+ N =:= 0 ->
+ true = ets:delete(pg2_fixed_table, {pid, Pid, Name}),
+ _ = [ets:delete(pg2_fixed_table, {local_member, Name, Pid}) ||
+ node(Pid) =:= node()],
+ true = ets:delete(pg2_fixed_table, Member_Name_Pid);
+ true ->
+ ok
+ end,
+ Ref_Pid = {ref, Pid},
+ case ets:update_counter(pg2_fixed_table, Ref_Pid, {4, -1}) of
+ 0 ->
+ [{Ref_Pid,RPid,Ref,0}] = ets:lookup(pg2_fixed_table, Ref_Pid),
+ true = ets:delete(pg2_fixed_table, {ref, Ref}),
+ true = ets:delete(pg2_fixed_table, Ref_Pid),
+ true = erlang:demonitor(Ref, [flush]),
+ kill_monitor_proc(RPid, Pid);
+ _ ->
+ ok
+ end
+ catch _:_ ->
+ ok
+ end.
+
+all_members() ->
+ [[G, group_members(G)] || G <- all_groups()].
+
+group_members(Name) ->
+ [P ||
+ [P, N] <- ets:match(pg2_fixed_table, {{member, Name, '$1'},'$2'}),
+ _ <- lists:seq(1, N)].
+
+local_group_members(Name) ->
+ [P ||
+ [Pid] <- ets:match(pg2_fixed_table, {{local_member, Name, '$1'}}),
+ P <- member_in_group(Pid, Name)].
+
+member_in_group(Pid, Name) ->
+ case ets:lookup(pg2_fixed_table, {member, Name, Pid}) of
+ [] -> [];
+ [{{member, Name, Pid}, N}] ->
+ lists:duplicate(N, Pid)
+ end.
+
+member_groups(Pid) ->
+ [Name || [Name] <- ets:match(pg2_fixed_table, {{pid, Pid, '$1'}})].
+
+all_groups() ->
+ [N || [N] <- ets:match(pg2_fixed_table, {{group,'$1'}})].
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {pg2_fixed, {?MODULE, start_link, []}, permanent,
+ 1000, worker, [?MODULE]},
+ supervisor:start_child(kernel_safe_sup, C);
+ Pg2_FixedPid ->
+ {ok, Pg2_FixedPid}
+ end.
+
+
+kill_monitor_proc(RPid, Pid) ->
+ case RPid of
+ Pid -> ok;
+ _ -> exit(RPid, kill)
+ end.
+
+%% When/if erlang:monitor() returns before trying to connect to the
+%% other node this function can be removed.
+do_monitor(Pid) ->
+ case (node(Pid) =:= node()) orelse lists:member(node(Pid), nodes()) of
+ true ->
+ %% Assume the node is still up
+ {Pid, erlang:monitor(process, Pid)};
+ false ->
+ F = fun() ->
+ Ref = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', Ref, process, Pid, _Info} ->
+ exit(normal)
+ end
+ end,
+ erlang:spawn_monitor(F)
+ end.
diff --git a/src/priority_queue.erl b/src/priority_queue.erl
index 4a94b24b..4fc8b469 100644
--- a/src/priority_queue.erl
+++ b/src/priority_queue.erl
@@ -47,7 +47,10 @@
-ifdef(use_specs).
--type(priority() :: integer()).
+-export_type([q/0]).
+
+-type(q() :: pqueue()).
+-type(priority() :: integer() | 'infinity').
-type(squeue() :: {queue, [any()], [any()]}).
-type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
@@ -71,8 +74,9 @@ new() ->
is_queue({queue, R, F}) when is_list(R), is_list(F) ->
true;
is_queue({pqueue, Queues}) when is_list(Queues) ->
- lists:all(fun ({P, Q}) -> is_integer(P) andalso is_queue(Q) end,
- Queues);
+ lists:all(fun ({infinity, Q}) -> is_queue(Q);
+ ({P, Q}) -> is_integer(P) andalso is_queue(Q)
+ end, Queues);
is_queue(_) ->
false.
@@ -89,7 +93,8 @@ len({pqueue, Queues}) ->
to_list({queue, In, Out}) when is_list(In), is_list(Out) ->
[{0, V} || V <- Out ++ lists:reverse(In, [])];
to_list({pqueue, Queues}) ->
- [{-P, V} || {P, Q} <- Queues, {0, V} <- to_list(Q)].
+ [{maybe_negate_priority(P), V} || {P, Q} <- Queues,
+ {0, V} <- to_list(Q)].
in(Item, Q) ->
in(Item, 0, Q).
@@ -103,12 +108,20 @@ in(X, Priority, _Q = {queue, [], []}) ->
in(X, Priority, Q = {queue, _, _}) ->
in(X, Priority, {pqueue, [{0, Q}]});
in(X, Priority, {pqueue, Queues}) ->
- P = -Priority,
+ P = maybe_negate_priority(Priority),
{pqueue, case lists:keysearch(P, 1, Queues) of
{value, {_, Q}} ->
lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
+ false when P == infinity ->
+ [{P, {queue, [X], []}} | Queues];
false ->
- lists:keysort(1, [{P, {queue, [X], []}} | Queues])
+ case Queues of
+ [{infinity, InfQueue} | Queues1] ->
+ [{infinity, InfQueue} |
+ lists:keysort(1, [{P, {queue, [X], []}} | Queues1])];
+ _ ->
+ lists:keysort(1, [{P, {queue, [X], []}} | Queues])
+ end
end}.
out({queue, [], []} = Q) ->
@@ -141,7 +154,8 @@ join({queue, [], []}, B) ->
join({queue, AIn, AOut}, {queue, BIn, BOut}) ->
{queue, BIn, AOut ++ lists:reverse(AIn, BOut)};
join(A = {queue, _, _}, {pqueue, BPQ}) ->
- {Pre, Post} = lists:splitwith(fun ({P, _}) -> P < 0 end, BPQ),
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
Post1 = case Post of
[] -> [ {0, A} ];
[ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
@@ -149,7 +163,8 @@ join(A = {queue, _, _}, {pqueue, BPQ}) ->
end,
{pqueue, Pre ++ Post1};
join({pqueue, APQ}, B = {queue, _, _}) ->
- {Pre, Post} = lists:splitwith(fun ({P, _}) -> P < 0 end, APQ),
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
Post1 = case Post of
[] -> [ {0, B} ];
[ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
@@ -165,7 +180,7 @@ merge(APQ, [], Acc) ->
lists:reverse(Acc, APQ);
merge([{P, A}|As], [{P, B}|Bs], Acc) ->
merge(As, Bs, [ {P, join(A, B)} | Acc ]);
-merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB ->
+merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
merge(As, Bs, [ {PA, A} | Acc ]);
merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
merge(As, Bs, [ {PB, B} | Acc ]).
@@ -174,3 +189,6 @@ r2f([]) -> {queue, [], []};
r2f([_] = R) -> {queue, [], R};
r2f([X,Y]) -> {queue, [X], [Y]};
r2f([X,Y|R]) -> {queue, [X,Y], lists:reverse(R, [])}.
+
+maybe_negate_priority(infinity) -> infinity;
+maybe_negate_priority(P) -> -P.
diff --git a/src/rabbit.erl b/src/rabbit.erl
index e067607d..47bc4433 100644
--- a/src/rabbit.erl
+++ b/src/rabbit.erl
@@ -18,8 +18,9 @@
-behaviour(application).
--export([prepare/0, start/0, stop/0, stop_and_halt/0, status/0, environment/0,
- rotate_logs/1]).
+-export([prepare/0, start/0, stop/0, stop_and_halt/0, status/0,
+ is_running/0 , is_running/1, environment/0,
+ rotate_logs/1, force_event_refresh/0]).
-export([start/2, stop/1]).
@@ -187,14 +188,17 @@
-spec(prepare/0 :: () -> 'ok').
-spec(start/0 :: () -> 'ok').
-spec(stop/0 :: () -> 'ok').
--spec(stop_and_halt/0 :: () -> 'ok').
+-spec(stop_and_halt/0 :: () -> no_return()).
-spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())).
+-spec(force_event_refresh/0 :: () -> 'ok').
-spec(status/0 ::
() -> [{pid, integer()} |
{running_applications, [{atom(), string(), string()}]} |
{os, {atom(), atom()}} |
{erlang_version, string()} |
{memory, any()}]).
+-spec(is_running/0 :: () -> boolean()).
+-spec(is_running/1 :: (node()) -> boolean()).
-spec(environment/0 :: () -> [{atom() | term()}]).
-spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()).
@@ -202,6 +206,14 @@
-spec(boot_delegate/0 :: () -> 'ok').
-spec(recover/0 :: () -> 'ok').
+-spec(start/2 :: ('normal',[]) ->
+ {'error',
+ {'erlang_version_too_old',
+ {'found',[any()]},
+ {'required',[any(),...]}}} |
+ {'ok',pid()}).
+-spec(stop/1 :: (_) -> 'ok').
+
-endif.
%%----------------------------------------------------------------------------
@@ -220,23 +232,33 @@ start() ->
end.
stop() ->
+ rabbit_log:info("Stopping Rabbit~n"),
ok = rabbit_misc:stop_applications(application_load_order()).
stop_and_halt() ->
try
stop()
after
+ rabbit_misc:local_info_msg("Halting Erlang VM~n", []),
init:stop()
end,
ok.
status() ->
[{pid, list_to_integer(os:getpid())},
- {running_applications, application:which_applications()},
+ {running_applications, application:which_applications(infinity)},
{os, os:type()},
{erlang_version, erlang:system_info(system_version)},
{memory, erlang:memory()}].
+is_running() -> is_running(node()).
+
+is_running(Node) ->
+ case rpc:call(Node, application, which_applications, [infinity]) of
+ {badrpc, _} -> false;
+ Apps -> proplists:is_defined(rabbit, Apps)
+ end.
+
environment() ->
lists:keysort(
1, [P || P = {K, _} <- application:get_all_env(rabbit),
@@ -244,6 +266,7 @@ environment() ->
rotate_logs(BinarySuffix) ->
Suffix = binary_to_list(BinarySuffix),
+ rabbit_misc:local_info_msg("Rotating logs with suffix '~s'~n", [Suffix]),
log_rotation_result(rotate_logs(log_location(kernel),
Suffix,
rabbit_error_logger_file_h),
@@ -441,20 +464,20 @@ insert_default_data() ->
ensure_working_log_handlers() ->
Handlers = gen_event:which_handlers(error_logger),
- ok = ensure_working_log_handler(error_logger_file_h,
+ ok = ensure_working_log_handler(error_logger_tty_h,
rabbit_error_logger_file_h,
error_logger_tty_h,
log_location(kernel),
Handlers),
- ok = ensure_working_log_handler(sasl_report_file_h,
+ ok = ensure_working_log_handler(sasl_report_tty_h,
rabbit_sasl_report_file_h,
sasl_report_tty_h,
log_location(sasl),
Handlers),
ok.
-ensure_working_log_handler(OldFHandler, NewFHandler, TTYHandler,
+ensure_working_log_handler(OldHandler, NewHandler, TTYHandler,
LogLocation, Handlers) ->
case LogLocation of
undefined -> ok;
@@ -464,10 +487,10 @@ ensure_working_log_handler(OldFHandler, NewFHandler, TTYHandler,
throw({error, {cannot_log_to_tty,
TTYHandler, not_installed}})
end;
- _ -> case lists:member(NewFHandler, Handlers) of
+ _ -> case lists:member(NewHandler, Handlers) of
true -> ok;
false -> case rotate_logs(LogLocation, "",
- OldFHandler, NewFHandler) of
+ OldHandler, NewHandler) of
ok -> ok;
{error, Reason} ->
throw({error, {cannot_log_to_file,
@@ -477,10 +500,10 @@ ensure_working_log_handler(OldFHandler, NewFHandler, TTYHandler,
end.
log_location(Type) ->
- case application:get_env(Type, case Type of
- kernel -> error_logger;
- sasl -> sasl_error_logger
- end) of
+ case application:get_env(rabbit, case Type of
+ kernel -> error_logger;
+ sasl -> sasl_error_logger
+ end) of
{ok, {file, File}} -> File;
{ok, false} -> undefined;
{ok, tty} -> tty;
@@ -512,6 +535,12 @@ log_rotation_result(ok, {error, SaslLogError}) ->
log_rotation_result(ok, ok) ->
ok.
+force_event_refresh() ->
+ rabbit_direct:force_event_refresh(),
+ rabbit_networking:force_connection_event_refresh(),
+ rabbit_channel:force_event_refresh(),
+ rabbit_amqqueue:force_event_refresh().
+
%%---------------------------------------------------------------------------
%% misc
diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl
index c0ae18c0..ca28d686 100644
--- a/src/rabbit_access_control.erl
+++ b/src/rabbit_access_control.erl
@@ -32,6 +32,9 @@
-spec(check_user_pass_login/2 ::
(rabbit_types:username(), rabbit_types:password())
-> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
+-spec(check_user_login/2 ::
+ (rabbit_types:username(), [{atom(), any()}])
+ -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
-spec(check_vhost_access/2 ::
(rabbit_types:user(), rabbit_types:vhost())
-> 'ok' | rabbit_types:channel_exit()).
diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl
index e9d01d12..b3e92b69 100644
--- a/src/rabbit_amqqueue.erl
+++ b/src/rabbit_amqqueue.erl
@@ -21,7 +21,8 @@
-export([lookup/1, with/2, with_or_die/2, assert_equivalence/5,
check_exclusive_access/2, with_exclusive_access_or_die/3,
stat/1, deliver/2, requeue/3, ack/3, reject/4]).
--export([list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]).
+-export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]).
+-export([force_event_refresh/0]).
-export([consumers/1, consumers_all/1, consumer_info_keys/0]).
-export([basic_get/3, basic_consume/7, basic_cancel/4]).
-export([notify_sent/2, unblock/2, flush_all/2]).
@@ -32,9 +33,7 @@
%% internal
-export([internal_declare/2, internal_delete/1, run_backing_queue/3,
- sync_timeout/1, update_ram_duration/1, set_ram_duration_target/2,
- set_maximum_since_use/2, maybe_expire/1, drop_expired/1,
- emit_stats/1]).
+ set_ram_duration_target/2, set_maximum_since_use/2]).
-include("rabbit.hrl").
-include_lib("stdlib/include/qlc.hrl").
@@ -50,7 +49,7 @@
-type(name() :: rabbit_types:r('queue')).
-type(qlen() :: rabbit_types:ok(non_neg_integer())).
--type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A)).
+-type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A | no_return())).
-type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}).
-type(msg_id() :: non_neg_integer()).
-type(ok_or_errors() ::
@@ -65,6 +64,9 @@
rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
-> {'new' | 'existing', rabbit_types:amqqueue()} |
rabbit_types:channel_exit()).
+-spec(internal_declare/2 ::
+ (rabbit_types:amqqueue(), boolean())
+ -> queue_or_not_found() | rabbit_misc:thunk(queue_or_not_found())).
-spec(lookup/1 ::
(name()) -> rabbit_types:ok(rabbit_types:amqqueue()) |
rabbit_types:error('not_found')).
@@ -81,6 +83,7 @@
-> 'ok' | rabbit_types:channel_exit()).
-spec(with_exclusive_access_or_die/3 ::
(name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()).
+-spec(list/0 :: () -> [rabbit_types:amqqueue()]).
-spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
-spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()).
@@ -90,6 +93,7 @@
-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
-spec(info_all/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
-> [rabbit_types:infos()]).
+-spec(force_event_refresh/0 :: () -> 'ok').
-spec(consumers/1 ::
(rabbit_types:amqqueue())
-> [{pid(), rabbit_types:ctag(), boolean()}]).
@@ -100,7 +104,6 @@
-spec(stat/1 ::
(rabbit_types:amqqueue())
-> {'ok', non_neg_integer(), non_neg_integer()}).
--spec(emit_stats/1 :: (rabbit_types:amqqueue()) -> 'ok').
-spec(delete_immediately/1 :: (rabbit_types:amqqueue()) -> 'ok').
-spec(delete/3 ::
(rabbit_types:amqqueue(), 'false', 'false')
@@ -119,21 +122,19 @@
-spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok').
-spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok').
-spec(notify_down_all/2 :: ([pid()], pid()) -> ok_or_errors()).
--spec(limit_all/3 :: ([pid()], pid(), pid() | 'undefined') -> ok_or_errors()).
+-spec(limit_all/3 :: ([pid()], pid(), rabbit_limiter:token()) ->
+ ok_or_errors()).
-spec(basic_get/3 :: (rabbit_types:amqqueue(), pid(), boolean()) ->
{'ok', non_neg_integer(), qmsg()} | 'empty').
-spec(basic_consume/7 ::
- (rabbit_types:amqqueue(), boolean(), pid(), pid() | 'undefined',
- rabbit_types:ctag(), boolean(), any())
+ (rabbit_types:amqqueue(), boolean(), pid(),
+ rabbit_limiter:token(), rabbit_types:ctag(), boolean(), any())
-> rabbit_types:ok_or_error('exclusive_consume_unavailable')).
-spec(basic_cancel/4 ::
(rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok').
-spec(notify_sent/2 :: (pid(), pid()) -> 'ok').
-spec(unblock/2 :: (pid(), pid()) -> 'ok').
-spec(flush_all/2 :: ([pid()], pid()) -> 'ok').
--spec(internal_declare/2 ::
- (rabbit_types:amqqueue(), boolean())
- -> queue_or_not_found() | rabbit_misc:thunk(queue_or_not_found())).
-spec(internal_delete/1 ::
(name()) -> rabbit_types:ok_or_error('not_found') |
rabbit_types:connection_exit() |
@@ -142,13 +143,11 @@
-spec(run_backing_queue/3 ::
(pid(), atom(),
(fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok').
--spec(sync_timeout/1 :: (pid()) -> 'ok').
--spec(update_ram_duration/1 :: (pid()) -> 'ok').
-spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok').
-spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(maybe_expire/1 :: (pid()) -> 'ok').
-spec(on_node_down/1 :: (node()) -> 'ok').
-spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()).
+-spec(store_queue/1 :: (rabbit_types:amqqueue()) -> 'ok').
-endif.
@@ -230,7 +229,7 @@ internal_declare(Q = #amqqueue{name = QueueName}, false) ->
end).
store_queue(Q = #amqqueue{durable = true}) ->
- ok = mnesia:write(rabbit_durable_queue, Q, write),
+ ok = mnesia:write(rabbit_durable_queue, Q#amqqueue{slave_pids = []}, write),
ok = mnesia:write(rabbit_queue, Q, write),
ok;
store_queue(Q = #amqqueue{durable = false}) ->
@@ -322,7 +321,7 @@ check_declare_arguments(QueueName, Args) ->
ok -> ok;
{error, Error} -> rabbit_misc:protocol_error(
precondition_failed,
- "invalid arg '~s' for ~s: ~w",
+ "invalid arg '~s' for ~s: ~255p",
[Key, rabbit_misc:rs(QueueName), Error])
end || {Key, Fun} <-
[{<<"x-expires">>, fun check_integer_argument/2},
@@ -365,6 +364,9 @@ check_ha_policy_argument({longstr, Policy}, _Args) ->
check_ha_policy_argument({Type, _}, _Args) ->
{error, {unacceptable_type, Type}}.
+list() ->
+ mnesia:dirty_match_object(rabbit_queue, #amqqueue{_ = '_'}).
+
list(VHostPath) ->
mnesia:dirty_match_object(
rabbit_queue,
@@ -387,6 +389,10 @@ info_all(VHostPath) -> map(VHostPath, fun (Q) -> info(Q) end).
info_all(VHostPath, Items) -> map(VHostPath, fun (Q) -> info(Q, Items) end).
+force_event_refresh() ->
+ [gen_server2:cast(Q#amqqueue.pid, force_event_refresh) || Q <- list()],
+ ok.
+
consumers(#amqqueue{ pid = QPid }) ->
delegate_call(QPid, consumers).
@@ -405,9 +411,6 @@ consumers_all(VHostPath) ->
stat(#amqqueue{pid = QPid}) ->
delegate_call(QPid, stat).
-emit_stats(#amqqueue{pid = QPid}) ->
- delegate_cast(QPid, emit_stats).
-
delete_immediately(#amqqueue{ pid = QPid }) ->
gen_server2:cast(QPid, delete_immediately).
@@ -439,19 +442,17 @@ notify_down_all(QPids, ChPid) ->
fun (QPid) -> gen_server2:call(QPid, {notify_down, ChPid}, infinity) end,
QPids).
-limit_all(QPids, ChPid, LimiterPid) ->
+limit_all(QPids, ChPid, Limiter) ->
delegate:invoke_no_result(
- QPids, fun (QPid) ->
- gen_server2:cast(QPid, {limit, ChPid, LimiterPid})
- end).
+ QPids, fun (QPid) -> gen_server2:cast(QPid, {limit, ChPid, Limiter}) end).
basic_get(#amqqueue{pid = QPid}, ChPid, NoAck) ->
delegate_call(QPid, {basic_get, ChPid, NoAck}).
-basic_consume(#amqqueue{pid = QPid}, NoAck, ChPid, LimiterPid,
+basic_consume(#amqqueue{pid = QPid}, NoAck, ChPid, Limiter,
ConsumerTag, ExclusiveConsume, OkMsg) ->
delegate_call(QPid, {basic_consume, NoAck, ChPid,
- LimiterPid, ConsumerTag, ExclusiveConsume, OkMsg}).
+ Limiter, ConsumerTag, ExclusiveConsume, OkMsg}).
basic_cancel(#amqqueue{pid = QPid}, ChPid, ConsumerTag, OkMsg) ->
ok = delegate_call(QPid, {basic_cancel, ChPid, ConsumerTag, OkMsg}).
@@ -486,24 +487,12 @@ internal_delete(QueueName) ->
run_backing_queue(QPid, Mod, Fun) ->
gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
-sync_timeout(QPid) ->
- gen_server2:cast(QPid, sync_timeout).
-
-update_ram_duration(QPid) ->
- gen_server2:cast(QPid, update_ram_duration).
-
set_ram_duration_target(QPid, Duration) ->
gen_server2:cast(QPid, {set_ram_duration_target, Duration}).
set_maximum_since_use(QPid, Age) ->
gen_server2:cast(QPid, {set_maximum_since_use, Age}).
-maybe_expire(QPid) ->
- gen_server2:cast(QPid, maybe_expire).
-
-drop_expired(QPid) ->
- gen_server2:cast(QPid, drop_expired).
-
on_node_down(Node) ->
rabbit_misc:execute_mnesia_tx_with_tail(
fun () -> Dels = qlc:e(qlc:q([delete_queue(QueueName) ||
diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl
index 4492bbd8..e3a2ca90 100644
--- a/src/rabbit_amqqueue_process.erl
+++ b/src/rabbit_amqqueue_process.erl
@@ -29,13 +29,11 @@
-export([start_link/1, info_keys/0]).
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
- handle_info/2, handle_pre_hibernate/1, prioritise_call/3,
- prioritise_cast/2, prioritise_info/2]).
-
-export([init_with_backing_queue_state/7]).
--export([format_message_queue/2]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, prioritise_call/3,
+ prioritise_cast/2, prioritise_info/2, format_message_queue/2]).
%% Queue's state
-record(q, {q,
@@ -44,7 +42,6 @@
backing_queue,
backing_queue_state,
active_consumers,
- blocked_consumers,
expires,
sync_timer_ref,
rate_timer_ref,
@@ -58,14 +55,30 @@
-record(consumer, {tag, ack_required}).
%% These are held in our process dictionary
--record(cr, {consumer_count,
- ch_pid,
- limiter_pid,
+-record(cr, {ch_pid,
monitor_ref,
acktags,
+ consumer_count,
+ blocked_consumers,
+ limiter,
is_limit_active,
unsent_message_count}).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/1 ::
+ (rabbit_types:amqqueue()) -> rabbit_types:ok_pid_or_error()).
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-spec(init_with_backing_queue_state/7 ::
+ (rabbit_types:amqqueue(), atom(), tuple(), any(), [any()],
+ [rabbit_types:delivery()], dict()) -> #q{}).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
-define(STATISTICS_KEYS,
[pid,
exclusive_consumer_pid,
@@ -75,8 +88,8 @@
messages,
consumers,
memory,
- backing_queue_status,
- slave_pids
+ slave_pids,
+ backing_queue_status
]).
-define(CREATION_EVENT_KEYS,
@@ -86,10 +99,12 @@
auto_delete,
arguments,
owner_pid,
- mirror_nodes
+ slave_pids,
+ synchronised_slave_pids
]).
--define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+-define(INFO_KEYS,
+ ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid, slave_pids]).
%%----------------------------------------------------------------------------
@@ -109,7 +124,6 @@ init(Q) ->
backing_queue = backing_queue_module(Q),
backing_queue_state = undefined,
active_consumers = queue:new(),
- blocked_consumers = queue:new(),
expires = undefined,
sync_timer_ref = undefined,
rate_timer_ref = undefined,
@@ -135,7 +149,6 @@ init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS,
backing_queue = BQ,
backing_queue_state = BQS,
active_consumers = queue:new(),
- blocked_consumers = queue:new(),
expires = undefined,
sync_timer_ref = undefined,
rate_timer_ref = RateTRef,
@@ -151,11 +164,13 @@ terminate(shutdown = R, State = #q{backing_queue = BQ}) ->
terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
-terminate(Reason, State = #q{backing_queue = BQ}) ->
+terminate(Reason, State = #q{q = #amqqueue{name = QName},
+ backing_queue = BQ}) ->
%% FIXME: How do we cancel active subscriptions?
terminate_shutdown(fun (BQS) ->
rabbit_event:notify(
- queue_deleted, [{pid, self()}]),
+ queue_deleted, [{pid, self()},
+ {name, QName}]),
BQS1 = BQ:delete_and_terminate(Reason, BQS),
%% don't care if the internal delete
%% doesn't return 'ok'.
@@ -251,8 +266,7 @@ backing_queue_module(#amqqueue{arguments = Args}) ->
end.
ensure_sync_timer(State = #q{sync_timer_ref = undefined}) ->
- {ok, TRef} = timer:apply_after(
- ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]),
+ TRef = erlang:send_after(?SYNC_INTERVAL, self(), sync_timeout),
State#q{sync_timer_ref = TRef};
ensure_sync_timer(State) ->
State.
@@ -260,14 +274,12 @@ ensure_sync_timer(State) ->
stop_sync_timer(State = #q{sync_timer_ref = undefined}) ->
State;
stop_sync_timer(State = #q{sync_timer_ref = TRef}) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State#q{sync_timer_ref = undefined}.
ensure_rate_timer(State = #q{rate_timer_ref = undefined}) ->
- {ok, TRef} = timer:apply_after(
- ?RAM_DURATION_UPDATE_INTERVAL,
- rabbit_amqqueue, update_ram_duration,
- [self()]),
+ TRef = erlang:send_after(
+ ?RAM_DURATION_UPDATE_INTERVAL, self(), update_ram_duration),
State#q{rate_timer_ref = TRef};
ensure_rate_timer(State = #q{rate_timer_ref = just_measured}) ->
State#q{rate_timer_ref = undefined};
@@ -279,13 +291,13 @@ stop_rate_timer(State = #q{rate_timer_ref = undefined}) ->
stop_rate_timer(State = #q{rate_timer_ref = just_measured}) ->
State#q{rate_timer_ref = undefined};
stop_rate_timer(State = #q{rate_timer_ref = TRef}) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State#q{rate_timer_ref = undefined}.
stop_expiry_timer(State = #q{expiry_timer_ref = undefined}) ->
State;
stop_expiry_timer(State = #q{expiry_timer_ref = TRef}) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State#q{expiry_timer_ref = undefined}.
%% We wish to expire only when there are no consumers *and* the expiry
@@ -297,18 +309,16 @@ ensure_expiry_timer(State = #q{expires = Expires}) ->
case is_unused(State) of
true ->
NewState = stop_expiry_timer(State),
- {ok, TRef} = timer:apply_after(
- Expires, rabbit_amqqueue, maybe_expire, [self()]),
+ TRef = erlang:send_after(Expires, self(), maybe_expire),
NewState#q{expiry_timer_ref = TRef};
false ->
State
end.
ensure_stats_timer(State = #q{stats_timer = StatsTimer,
- q = Q}) ->
+ q = #amqqueue{pid = QPid}}) ->
State#q{stats_timer = rabbit_event:ensure_stats_timer(
- StatsTimer,
- fun() -> rabbit_amqqueue:emit_stats(Q) end)}.
+ StatsTimer, QPid, emit_stats)}.
assert_invariant(#q{active_consumers = AC,
backing_queue = BQ, backing_queue_state = BQS}) ->
@@ -324,40 +334,55 @@ ch_record(ChPid) ->
Key = {ch, ChPid},
case get(Key) of
undefined -> MonitorRef = erlang:monitor(process, ChPid),
- C = #cr{consumer_count = 0,
- ch_pid = ChPid,
+ C = #cr{ch_pid = ChPid,
monitor_ref = MonitorRef,
acktags = sets:new(),
+ consumer_count = 0,
+ blocked_consumers = queue:new(),
is_limit_active = false,
+ limiter = rabbit_limiter:make_token(),
unsent_message_count = 0},
put(Key, C),
C;
C = #cr{} -> C
end.
-store_ch_record(C = #cr{ch_pid = ChPid}) ->
- put({ch, ChPid}, C).
-
-maybe_store_ch_record(C = #cr{consumer_count = ConsumerCount,
- acktags = ChAckTags,
- unsent_message_count = UnsentMessageCount}) ->
+update_ch_record(C = #cr{consumer_count = ConsumerCount,
+ acktags = ChAckTags,
+ unsent_message_count = UnsentMessageCount}) ->
case {sets:size(ChAckTags), ConsumerCount, UnsentMessageCount} of
- {0, 0, 0} -> ok = erase_ch_record(C),
- false;
- _ -> store_ch_record(C),
- true
- end.
+ {0, 0, 0} -> ok = erase_ch_record(C);
+ _ -> ok = store_ch_record(C)
+ end,
+ C.
+
+store_ch_record(C = #cr{ch_pid = ChPid}) ->
+ put({ch, ChPid}, C),
+ ok.
erase_ch_record(#cr{ch_pid = ChPid,
- limiter_pid = LimiterPid,
+ limiter = Limiter,
monitor_ref = MonitorRef}) ->
- ok = rabbit_limiter:unregister(LimiterPid, self()),
+ ok = rabbit_limiter:unregister(Limiter, self()),
erlang:demonitor(MonitorRef),
erase({ch, ChPid}),
ok.
+update_consumer_count(C = #cr{consumer_count = 0, limiter = Limiter}, +1) ->
+ ok = rabbit_limiter:register(Limiter, self()),
+ update_ch_record(C#cr{consumer_count = 1});
+update_consumer_count(C = #cr{consumer_count = 1, limiter = Limiter}, -1) ->
+ ok = rabbit_limiter:unregister(Limiter, self()),
+ update_ch_record(C#cr{consumer_count = 0,
+ limiter = rabbit_limiter:make_token()});
+update_consumer_count(C = #cr{consumer_count = Count}, Delta) ->
+ update_ch_record(C#cr{consumer_count = Count + Delta}).
+
all_ch_record() -> [C || {{ch, _}, C} <- get()].
+block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) ->
+ update_ch_record(C#cr{blocked_consumers = queue:in(QEntry, Blocked)}).
+
is_ch_blocked(#cr{unsent_message_count = Count, is_limit_active = Limited}) ->
Limited orelse Count >= ?UNSENT_MESSAGE_LIMIT.
@@ -369,67 +394,56 @@ ch_record_state_transition(OldCR, NewCR) ->
end.
deliver_msgs_to_consumers(Funs = {PredFun, DeliverFun}, FunAcc,
- State = #q{q = #amqqueue{name = QName},
- active_consumers = ActiveConsumers,
- blocked_consumers = BlockedConsumers}) ->
- case queue:out(ActiveConsumers) of
- {{value, QEntry = {ChPid, #consumer{tag = ConsumerTag,
- ack_required = AckRequired}}},
- ActiveConsumersTail} ->
- C = #cr{limiter_pid = LimiterPid,
- unsent_message_count = Count,
- acktags = ChAckTags} = ch_record(ChPid),
- IsMsgReady = PredFun(FunAcc, State),
- case (IsMsgReady andalso
- rabbit_limiter:can_send( LimiterPid, self(), AckRequired )) of
- true ->
- {{Message, IsDelivered, AckTag}, FunAcc1, State1} =
- DeliverFun(AckRequired, FunAcc, State),
- rabbit_channel:deliver(
- ChPid, ConsumerTag, AckRequired,
- {QName, self(), AckTag, IsDelivered, Message}),
- ChAckTags1 =
- case AckRequired of
- true -> sets:add_element(AckTag, ChAckTags);
- false -> ChAckTags
- end,
- NewC = C#cr{unsent_message_count = Count + 1,
- acktags = ChAckTags1},
- true = maybe_store_ch_record(NewC),
- {NewActiveConsumers, NewBlockedConsumers} =
- case ch_record_state_transition(C, NewC) of
- ok -> {queue:in(QEntry, ActiveConsumersTail),
- BlockedConsumers};
- block -> {ActiveConsumers1, BlockedConsumers1} =
- move_consumers(ChPid,
- ActiveConsumersTail,
- BlockedConsumers),
- {ActiveConsumers1,
- queue:in(QEntry, BlockedConsumers1)}
- end,
- State2 = State1#q{
- active_consumers = NewActiveConsumers,
- blocked_consumers = NewBlockedConsumers},
- deliver_msgs_to_consumers(Funs, FunAcc1, State2);
- %% if IsMsgReady then we've hit the limiter
- false when IsMsgReady ->
- true = maybe_store_ch_record(C#cr{is_limit_active = true}),
- {NewActiveConsumers, NewBlockedConsumers} =
- move_consumers(ChPid,
- ActiveConsumers,
- BlockedConsumers),
- deliver_msgs_to_consumers(
- Funs, FunAcc,
- State#q{active_consumers = NewActiveConsumers,
- blocked_consumers = NewBlockedConsumers});
- false ->
- %% no message was ready, so we don't need to block anyone
- {FunAcc, State}
- end;
- {empty, _} ->
- {FunAcc, State}
+ State = #q{active_consumers = ActiveConsumers}) ->
+ case PredFun(FunAcc, State) of
+ false -> {FunAcc, State};
+ true -> case queue:out(ActiveConsumers) of
+ {empty, _} ->
+ {FunAcc, State};
+ {{value, QEntry}, Tail} ->
+ {FunAcc1, State1} =
+ deliver_msg_to_consumer(
+ DeliverFun, QEntry,
+ FunAcc, State#q{active_consumers = Tail}),
+ deliver_msgs_to_consumers(Funs, FunAcc1, State1)
+ end
end.
+deliver_msg_to_consumer(DeliverFun, E = {ChPid, Consumer}, FunAcc, State) ->
+ C = ch_record(ChPid),
+ case is_ch_blocked(C) of
+ true -> block_consumer(C, E),
+ {FunAcc, State};
+ false -> case rabbit_limiter:can_send(C#cr.limiter, self(),
+ Consumer#consumer.ack_required) of
+ false -> block_consumer(C#cr{is_limit_active = true}, E),
+ {FunAcc, State};
+ true -> AC1 = queue:in(E, State#q.active_consumers),
+ deliver_msg_to_consumer(
+ DeliverFun, Consumer, C, FunAcc,
+ State#q{active_consumers = AC1})
+ end
+ end.
+
+deliver_msg_to_consumer(DeliverFun,
+ #consumer{tag = ConsumerTag,
+ ack_required = AckRequired},
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ unsent_message_count = Count},
+ FunAcc, State = #q{q = #amqqueue{name = QName}}) ->
+ {{Message, IsDelivered, AckTag}, FunAcc1, State1} =
+ DeliverFun(AckRequired, FunAcc, State),
+ rabbit_channel:deliver(ChPid, ConsumerTag, AckRequired,
+ {QName, self(), AckTag, IsDelivered, Message}),
+ ChAckTags1 = case AckRequired of
+ true -> sets:add_element(AckTag, ChAckTags);
+ false -> ChAckTags
+ end,
+ update_ch_record(C#cr{acktags = ChAckTags1,
+ unsent_message_count = Count + 1}),
+ {FunAcc1, State1}.
+
deliver_from_queue_pred(IsEmpty, _State) -> not IsEmpty.
deliver_from_queue_deliver(AckRequired, false, State) ->
@@ -547,11 +561,9 @@ deliver_or_enqueue(Delivery = #delivery{message = Message,
maybe_record_confirm_message(Confirm, State1),
case Delivered of
true -> State2;
- false -> BQS1 =
- BQ:publish(Message,
- (message_properties(State)) #message_properties{
- needs_confirming = needs_confirming(Confirm)},
- ChPid, BQS),
+ false -> Props = (message_properties(State)) #message_properties{
+ needs_confirming = needs_confirming(Confirm)},
+ BQS1 = BQ:publish(Message, Props, ChPid, BQS),
ensure_ttl_timer(State2#q{backing_queue_state = BQS1})
end.
@@ -568,44 +580,34 @@ fetch(AckRequired, State = #q{backing_queue_state = BQS,
{Result, BQS1} = BQ:fetch(AckRequired, BQS),
{Result, State#q{backing_queue_state = BQS1}}.
-add_consumer(ChPid, Consumer, Queue) -> queue:in({ChPid, Consumer}, Queue).
-
remove_consumer(ChPid, ConsumerTag, Queue) ->
- queue:filter(fun ({CP, #consumer{tag = CT}}) ->
- (CP /= ChPid) or (CT /= ConsumerTag)
+ queue:filter(fun ({CP, #consumer{tag = CTag}}) ->
+ (CP /= ChPid) or (CTag /= ConsumerTag)
end, Queue).
remove_consumers(ChPid, Queue) ->
- {Kept, Removed} = split_by_channel(ChPid, Queue),
- [emit_consumer_deleted(Ch, CTag) ||
- {Ch, #consumer{tag = CTag}} <- queue:to_list(Removed)],
- Kept.
-
-move_consumers(ChPid, From, To) ->
- {Kept, Removed} = split_by_channel(ChPid, From),
- {Kept, queue:join(To, Removed)}.
-
-split_by_channel(ChPid, Queue) ->
- {Kept, Removed} = lists:partition(fun ({CP, _}) -> CP /= ChPid end,
- queue:to_list(Queue)),
- {queue:from_list(Kept), queue:from_list(Removed)}.
+ queue:filter(fun ({CP, #consumer{tag = CTag}}) when CP =:= ChPid ->
+ emit_consumer_deleted(ChPid, CTag),
+ false;
+ (_) ->
+ true
+ end, Queue).
possibly_unblock(State, ChPid, Update) ->
case lookup_ch(ChPid) of
not_found ->
State;
C ->
- NewC = Update(C),
- maybe_store_ch_record(NewC),
- case ch_record_state_transition(C, NewC) of
- ok -> State;
- unblock -> {NewBlockedConsumers, NewActiveConsumers} =
- move_consumers(ChPid,
- State#q.blocked_consumers,
- State#q.active_consumers),
- run_message_queue(
- State#q{active_consumers = NewActiveConsumers,
- blocked_consumers = NewBlockedConsumers})
+ C1 = Update(C),
+ case ch_record_state_transition(C, C1) of
+ ok -> update_ch_record(C1),
+ State;
+ unblock -> #cr{blocked_consumers = Consumers} = C1,
+ update_ch_record(
+ C1#cr{blocked_consumers = queue:new()}),
+ AC1 = queue:join(State#q.active_consumers,
+ Consumers),
+ run_message_queue(State#q{active_consumers = AC1})
end
end.
@@ -617,7 +619,10 @@ handle_ch_down(DownPid, State = #q{exclusive_consumer = Holder}) ->
case lookup_ch(DownPid) of
not_found ->
{ok, State};
- C = #cr{ch_pid = ChPid, acktags = ChAckTags} ->
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ blocked_consumers = Blocked} ->
+ _ = remove_consumers(ChPid, Blocked), %% for stats emission
ok = erase_ch_record(C),
State1 = State#q{
exclusive_consumer = case Holder of
@@ -625,9 +630,7 @@ handle_ch_down(DownPid, State = #q{exclusive_consumer = Holder}) ->
Other -> Other
end,
active_consumers = remove_consumers(
- ChPid, State#q.active_consumers),
- blocked_consumers = remove_consumers(
- ChPid, State#q.blocked_consumers)},
+ ChPid, State#q.active_consumers)},
case should_auto_delete(State1) of
true -> {stop, State1};
false -> {ok, requeue_and_run(sets:to_list(ChAckTags),
@@ -635,11 +638,6 @@ handle_ch_down(DownPid, State = #q{exclusive_consumer = Holder}) ->
end
end.
-cancel_holder(ChPid, ConsumerTag, {ChPid, ConsumerTag}) ->
- none;
-cancel_holder(_ChPid, _ConsumerTag, Holder) ->
- Holder.
-
check_exclusive_access({_ChPid, _ConsumerTag}, _ExclusiveConsume, _State) ->
in_use;
check_exclusive_access(none, false, _State) ->
@@ -650,8 +648,15 @@ check_exclusive_access(none, true, State) ->
false -> in_use
end.
-is_unused(State) -> queue:is_empty(State#q.active_consumers) andalso
- queue:is_empty(State#q.blocked_consumers).
+consumer_count() -> consumer_count(fun (_) -> false end).
+
+active_consumer_count() -> consumer_count(fun is_ch_blocked/1).
+
+consumer_count(Exclude) ->
+ lists:sum([Count || C = #cr{consumer_count = Count} <- all_ch_record(),
+ not Exclude(C)]).
+
+is_unused(_State) -> consumer_count() == 0.
maybe_send_reply(_ChPid, undefined) -> ok;
maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
@@ -665,8 +670,15 @@ run_backing_queue(Mod, Fun, State = #q{backing_queue = BQ,
backing_queue_state = BQS}) ->
run_message_queue(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)}).
-subtract_acks(A, B) when is_list(B) ->
- lists:foldl(fun sets:del_element/2, A, B).
+subtract_acks(ChPid, AckTags, State, Fun) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ State;
+ C = #cr{acktags = ChAckTags} ->
+ update_ch_record(C#cr{acktags = lists:foldl(fun sets:del_element/2,
+ ChAckTags, AckTags)}),
+ Fun(State)
+ end.
discard_delivery(#delivery{sender = ChPid,
message = Message},
@@ -702,8 +714,7 @@ ensure_ttl_timer(State = #q{backing_queue = BQ,
when TTL =/= undefined ->
case BQ:is_empty(BQS) of
true -> State;
- false -> TRef = timer:apply_after(TTL, rabbit_amqqueue, drop_expired,
- [self()]),
+ false -> TRef = erlang:send_after(TTL, self(), drop_expired),
State#q{ttl_timer_ref = TRef}
end;
ensure_ttl_timer(State) ->
@@ -711,7 +722,40 @@ ensure_ttl_timer(State) ->
now_micros() -> timer:now_diff(now(), {0,0,0}).
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+infos(Items, State) ->
+ {Prefix, Items1} =
+ case lists:member(synchronised_slave_pids, Items) of
+ true -> Prefix1 = slaves_status(State),
+ case lists:member(slave_pids, Items) of
+ true -> {Prefix1, Items -- [slave_pids]};
+ false -> {proplists:delete(slave_pids, Prefix1), Items}
+ end;
+ false -> {[], Items}
+ end,
+ Prefix ++ [{Item, i(Item, State)}
+ || Item <- (Items1 -- [synchronised_slave_pids])].
+
+slaves_status(#q{q = #amqqueue{name = Name}}) ->
+ {ok, #amqqueue{mirror_nodes = MNodes, slave_pids = SPids}} =
+ rabbit_amqqueue:lookup(Name),
+ case MNodes of
+ undefined ->
+ [{slave_pids, ''}, {synchronised_slave_pids, ''}];
+ _ ->
+ {Results, _Bad} =
+ delegate:invoke(
+ SPids, fun (Pid) -> rabbit_mirror_queue_slave:info(Pid) end),
+ {SPids1, SSPids} =
+ lists:foldl(
+ fun ({Pid, Infos}, {SPidsN, SSPidsN}) ->
+ {[Pid | SPidsN],
+ case proplists:get_bool(is_synchronised, Infos) of
+ true -> [Pid | SSPidsN];
+ false -> SSPidsN
+ end}
+ end, {[], []}, Results),
+ [{slave_pids, SPids1}, {synchronised_slave_pids, SSPids}]
+ end.
i(name, #q{q = #amqqueue{name = Name}}) -> Name;
i(durable, #q{q = #amqqueue{durable = Durable}}) -> Durable;
@@ -738,29 +782,32 @@ i(messages_unacknowledged, _) ->
i(messages, State) ->
lists:sum([i(Item, State) || Item <- [messages_ready,
messages_unacknowledged]]);
-i(consumers, State) ->
- queue:len(State#q.active_consumers) + queue:len(State#q.blocked_consumers);
+i(consumers, _) ->
+ consumer_count();
i(memory, _) ->
{memory, M} = process_info(self(), memory),
M;
+i(slave_pids, #q{q = #amqqueue{name = Name}}) ->
+ {ok, #amqqueue{mirror_nodes = MNodes,
+ slave_pids = SPids}} = rabbit_amqqueue:lookup(Name),
+ case MNodes of
+ undefined -> [];
+ _ -> SPids
+ end;
i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
BQ:status(BQS);
-i(slave_pids, #q{q = #amqqueue{name = Name}}) ->
- {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(Name),
- SPids;
-i(mirror_nodes, #q{q = #amqqueue{name = Name}}) ->
- {ok, #amqqueue{mirror_nodes = MNodes}} = rabbit_amqqueue:lookup(Name),
- MNodes;
i(Item, _) ->
throw({bad_argument, Item}).
-consumers(#q{active_consumers = ActiveConsumers,
- blocked_consumers = BlockedConsumers}) ->
+consumers(#q{active_consumers = ActiveConsumers}) ->
+ lists:foldl(fun (C, Acc) -> consumers(C#cr.blocked_consumers, Acc) end,
+ consumers(ActiveConsumers, []), all_ch_record()).
+
+consumers(Consumers, Acc) ->
rabbit_misc:queue_fold(
- fun ({ChPid, #consumer{tag = ConsumerTag,
- ack_required = AckRequired}}, Acc) ->
- [{ChPid, ConsumerTag, AckRequired} | Acc]
- end, [], queue:join(ActiveConsumers, BlockedConsumers)).
+ fun ({ChPid, #consumer{tag = CTag, ack_required = AckRequired}}, Acc1) ->
+ [{ChPid, CTag, AckRequired} | Acc1]
+ end, Acc, Consumers).
emit_stats(State) ->
emit_stats(State, []).
@@ -786,33 +833,38 @@ emit_consumer_deleted(ChPid, ConsumerTag) ->
prioritise_call(Msg, _From, _State) ->
case Msg of
- info -> 9;
- {info, _Items} -> 9;
- consumers -> 9;
- _ -> 0
+ info -> 9;
+ {info, _Items} -> 9;
+ consumers -> 9;
+ {basic_consume, _, _, _, _, _, _} -> 7;
+ {basic_cancel, _, _, _} -> 7;
+ stat -> 7;
+ _ -> 0
end.
prioritise_cast(Msg, _State) ->
case Msg of
- update_ram_duration -> 8;
delete_immediately -> 8;
{set_ram_duration_target, _Duration} -> 8;
{set_maximum_since_use, _Age} -> 8;
- maybe_expire -> 8;
- drop_expired -> 8;
- emit_stats -> 7;
{ack, _AckTags, _ChPid} -> 7;
{reject, _AckTags, _Requeue, _ChPid} -> 7;
{notify_sent, _ChPid} -> 7;
{unblock, _ChPid} -> 7;
{run_backing_queue, _Mod, _Fun} -> 6;
- sync_timeout -> 6;
_ -> 0
end.
-prioritise_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
- #q{q = #amqqueue{exclusive_owner = DownPid}}) -> 8;
-prioritise_info(_Msg, _State) -> 0.
+prioritise_info(Msg, #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
+ case Msg of
+ {'DOWN', _, process, DownPid, _} -> 8;
+ update_ram_duration -> 8;
+ maybe_expire -> 8;
+ drop_expired -> 8;
+ emit_stats -> 7;
+ sync_timeout -> 6;
+ _ -> 0
+ end.
handle_call({init, Recover}, From,
State = #q{q = #amqqueue{exclusive_owner = none}}) ->
@@ -895,10 +947,8 @@ handle_call({basic_get, ChPid, NoAck}, _From,
State3 =
case AckRequired of
true -> C = #cr{acktags = ChAckTags} = ch_record(ChPid),
- true = maybe_store_ch_record(
- C#cr{acktags =
- sets:add_element(AckTag,
- ChAckTags)}),
+ ChAckTags1 = sets:add_element(AckTag, ChAckTags),
+ update_ch_record(C#cr{acktags = ChAckTags1}),
State2;
false -> State2
end,
@@ -906,7 +956,7 @@ handle_call({basic_get, ChPid, NoAck}, _From,
reply({ok, Remaining, Msg}, State3)
end;
-handle_call({basic_consume, NoAck, ChPid, LimiterPid,
+handle_call({basic_consume, NoAck, ChPid, Limiter,
ConsumerTag, ExclusiveConsume, OkMsg},
_From, State = #q{exclusive_consumer = ExistingHolder}) ->
case check_exclusive_access(ExistingHolder, ExclusiveConsume,
@@ -914,32 +964,24 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid,
in_use ->
reply({error, exclusive_consume_unavailable}, State);
ok ->
- C = #cr{consumer_count = ConsumerCount} = ch_record(ChPid),
+ C = ch_record(ChPid),
+ C1 = update_consumer_count(C#cr{limiter = Limiter}, +1),
Consumer = #consumer{tag = ConsumerTag,
ack_required = not NoAck},
- true = maybe_store_ch_record(C#cr{consumer_count = ConsumerCount +1,
- limiter_pid = LimiterPid}),
- ok = case ConsumerCount of
- 0 -> rabbit_limiter:register(LimiterPid, self());
- _ -> ok
- end,
ExclusiveConsumer = if ExclusiveConsume -> {ChPid, ConsumerTag};
true -> ExistingHolder
end,
State1 = State#q{has_had_consumers = true,
exclusive_consumer = ExclusiveConsumer},
ok = maybe_send_reply(ChPid, OkMsg),
+ E = {ChPid, Consumer},
State2 =
- case is_ch_blocked(C) of
- true -> State1#q{
- blocked_consumers =
- add_consumer(ChPid, Consumer,
- State1#q.blocked_consumers)};
- false -> run_message_queue(
- State1#q{
- active_consumers =
- add_consumer(ChPid, Consumer,
- State1#q.active_consumers)})
+ case is_ch_blocked(C1) of
+ true -> block_consumer(C1, E),
+ State1;
+ false -> update_ch_record(C1),
+ AC1 = queue:in(E, State1#q.active_consumers),
+ run_message_queue(State1#q{active_consumers = AC1})
end,
emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
not NoAck),
@@ -948,42 +990,32 @@ handle_call({basic_consume, NoAck, ChPid, LimiterPid,
handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From,
State = #q{exclusive_consumer = Holder}) ->
+ ok = maybe_send_reply(ChPid, OkMsg),
case lookup_ch(ChPid) of
not_found ->
- ok = maybe_send_reply(ChPid, OkMsg),
reply(ok, State);
- C = #cr{consumer_count = ConsumerCount,
- limiter_pid = LimiterPid} ->
- C1 = C#cr{consumer_count = ConsumerCount -1},
- maybe_store_ch_record(
- case ConsumerCount of
- 1 -> ok = rabbit_limiter:unregister(LimiterPid, self()),
- C1#cr{limiter_pid = undefined};
- _ -> C1
- end),
+ C = #cr{blocked_consumers = Blocked} ->
emit_consumer_deleted(ChPid, ConsumerTag),
- ok = maybe_send_reply(ChPid, OkMsg),
- NewState =
- State#q{exclusive_consumer = cancel_holder(ChPid,
- ConsumerTag,
- Holder),
- active_consumers = remove_consumer(
- ChPid, ConsumerTag,
- State#q.active_consumers),
- blocked_consumers = remove_consumer(
+ Blocked1 = remove_consumer(ChPid, ConsumerTag, Blocked),
+ update_consumer_count(C#cr{blocked_consumers = Blocked1}, -1),
+ State1 = State#q{
+ exclusive_consumer = case Holder of
+ {ChPid, ConsumerTag} -> none;
+ _ -> Holder
+ end,
+ active_consumers = remove_consumer(
ChPid, ConsumerTag,
- State#q.blocked_consumers)},
- case should_auto_delete(NewState) of
- false -> reply(ok, ensure_expiry_timer(NewState));
- true -> {stop, normal, ok, NewState}
+ State#q.active_consumers)},
+ case should_auto_delete(State1) of
+ false -> reply(ok, ensure_expiry_timer(State1));
+ true -> {stop, normal, ok, State1}
end
end;
handle_call(stat, _From, State) ->
- State1 = #q{backing_queue = BQ, backing_queue_state = BQS,
- active_consumers = ActiveConsumers} =
+ State1 = #q{backing_queue = BQ, backing_queue_state = BQS} =
drop_expired_messages(ensure_expiry_timer(State)),
- reply({ok, BQ:len(BQS), queue:len(ActiveConsumers)}, State1);
+ reply({ok, BQ:len(BQS), active_consumer_count()}, State1);
handle_call({delete, IfUnused, IfEmpty}, _From,
State = #q{backing_queue_state = BQS, backing_queue = BQ}) ->
@@ -1005,52 +1037,37 @@ handle_call(purge, _From, State = #q{backing_queue = BQ,
handle_call({requeue, AckTags, ChPid}, From, State) ->
gen_server2:reply(From, ok),
- case lookup_ch(ChPid) of
- not_found ->
- noreply(State);
- C = #cr{acktags = ChAckTags} ->
- ChAckTags1 = subtract_acks(ChAckTags, AckTags),
- maybe_store_ch_record(C#cr{acktags = ChAckTags1}),
- noreply(requeue_and_run(AckTags, State))
- end.
+ noreply(subtract_acks(
+ ChPid, AckTags, State,
+ fun (State1) -> requeue_and_run(AckTags, State1) end)).
handle_cast({run_backing_queue, Mod, Fun}, State) ->
noreply(run_backing_queue(Mod, Fun, State));
-handle_cast(sync_timeout, State) ->
- noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined}));
-
handle_cast({deliver, Delivery}, State) ->
%% Asynchronous, non-"mandatory", non-"immediate" deliver mode.
noreply(deliver_or_enqueue(Delivery, State));
-handle_cast({ack, AckTags, ChPid},
- State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
- case lookup_ch(ChPid) of
- not_found ->
- noreply(State);
- C = #cr{acktags = ChAckTags} ->
- maybe_store_ch_record(C#cr{acktags = subtract_acks(
- ChAckTags, AckTags)}),
- {_Guids, BQS1} = BQ:ack(AckTags, BQS),
- noreply(State#q{backing_queue_state = BQS1})
- end;
-
-handle_cast({reject, AckTags, Requeue, ChPid},
- State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- case lookup_ch(ChPid) of
- not_found ->
- noreply(State);
- C = #cr{acktags = ChAckTags} ->
- ChAckTags1 = subtract_acks(ChAckTags, AckTags),
- maybe_store_ch_record(C#cr{acktags = ChAckTags1}),
- noreply(case Requeue of
- true -> requeue_and_run(AckTags, State);
- false -> {_Guids, BQS1} = BQ:ack(AckTags, BQS),
- State#q{backing_queue_state = BQS1}
- end)
- end;
+handle_cast({ack, AckTags, ChPid}, State) ->
+ noreply(subtract_acks(
+ ChPid, AckTags, State,
+ fun (State1 = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {_Guids, BQS1} = BQ:ack(AckTags, BQS),
+ State1#q{backing_queue_state = BQS1}
+ end));
+
+handle_cast({reject, AckTags, Requeue, ChPid}, State) ->
+ noreply(subtract_acks(
+ ChPid, AckTags, State,
+ fun (State1 = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ case Requeue of
+ true -> requeue_and_run(AckTags, State1);
+ false -> {_Guids, BQS1} = BQ:ack(AckTags, BQS),
+ State1#q{backing_queue_state = BQS1}
+ end
+ end));
handle_cast(delete_immediately, State) ->
{stop, normal, State};
@@ -1067,35 +1084,26 @@ handle_cast({notify_sent, ChPid}, State) ->
C#cr{unsent_message_count = Count - 1}
end));
-handle_cast({limit, ChPid, LimiterPid}, State) ->
+handle_cast({limit, ChPid, Limiter}, State) ->
noreply(
possibly_unblock(
State, ChPid,
- fun (C = #cr{consumer_count = ConsumerCount,
- limiter_pid = OldLimiterPid,
- is_limit_active = Limited}) ->
- if ConsumerCount =/= 0 andalso OldLimiterPid == undefined ->
- ok = rabbit_limiter:register(LimiterPid, self());
- true ->
- ok
+ fun (C = #cr{consumer_count = ConsumerCount,
+ limiter = OldLimiter,
+ is_limit_active = OldLimited}) ->
+ case (ConsumerCount =/= 0 andalso
+ not rabbit_limiter:is_enabled(OldLimiter)) of
+ true -> ok = rabbit_limiter:register(Limiter, self());
+ false -> ok
end,
- NewLimited = Limited andalso LimiterPid =/= undefined,
- C#cr{limiter_pid = LimiterPid, is_limit_active = NewLimited}
+ Limited = OldLimited andalso rabbit_limiter:is_enabled(Limiter),
+ C#cr{limiter = Limiter, is_limit_active = Limited}
end));
handle_cast({flush, ChPid}, State) ->
ok = rabbit_channel:flushed(ChPid, self()),
noreply(State);
-handle_cast(update_ram_duration, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- {RamDuration, BQS1} = BQ:ram_duration(BQS),
- DesiredDuration =
- rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
- BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
- noreply(State#q{rate_timer_ref = just_measured,
- backing_queue_state = BQS2});
-
handle_cast({set_ram_duration_target, Duration},
State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
BQS1 = BQ:set_ram_duration_target(Duration, BQS),
@@ -1105,22 +1113,32 @@ handle_cast({set_maximum_since_use, Age}, State) ->
ok = file_handle_cache:set_maximum_since_use(Age),
noreply(State);
-handle_cast(maybe_expire, State) ->
+handle_cast(force_event_refresh, State = #q{exclusive_consumer = Exclusive}) ->
+ rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State)),
+ case Exclusive of
+ none -> [emit_consumer_created(Ch, CTag, false, AckRequired) ||
+ {Ch, CTag, AckRequired} <- consumers(State)];
+ {Ch, CTag} -> [{Ch, CTag, AckRequired}] = consumers(State),
+ emit_consumer_created(Ch, CTag, true, AckRequired)
+ end,
+ noreply(State).
+
+handle_info(maybe_expire, State) ->
case is_unused(State) of
true -> ?LOGDEBUG("Queue lease expired for ~p~n", [State#q.q]),
{stop, normal, State};
false -> noreply(ensure_expiry_timer(State))
end;
-handle_cast(drop_expired, State) ->
+handle_info(drop_expired, State) ->
noreply(drop_expired_messages(State#q{ttl_timer_ref = undefined}));
-handle_cast(emit_stats, State = #q{stats_timer = StatsTimer}) ->
+handle_info(emit_stats, State = #q{stats_timer = StatsTimer}) ->
%% Do not invoke noreply as it would see no timer and create a new one.
emit_stats(State),
State1 = State#q{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)},
assert_invariant(State1),
- {noreply, State1, hibernate}.
+ {noreply, State1, hibernate};
handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
State = #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
@@ -1137,6 +1155,18 @@ handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) ->
{stop, NewState} -> {stop, normal, NewState}
end;
+handle_info(update_ram_duration, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ noreply(State#q{rate_timer_ref = just_measured,
+ backing_queue_state = BQS2});
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined}));
+
handle_info(timeout, State) ->
noreply(backing_queue_timeout(State));
@@ -1157,23 +1187,11 @@ handle_pre_hibernate(State = #q{backing_queue = BQ,
rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
BQS3 = BQ:handle_pre_hibernate(BQS2),
- rabbit_event:if_enabled(StatsTimer,
- fun () ->
- emit_stats(State, [{idle_since, now()}])
- end),
+ rabbit_event:if_enabled(
+ StatsTimer,
+ fun () -> emit_stats(State, [{idle_since, now()}]) end),
State1 = State#q{stats_timer = rabbit_event:stop_stats_timer(StatsTimer),
backing_queue_state = BQS3},
{hibernate, stop_rate_timer(State1)}.
-format_message_queue(_Opt, Mailbox) ->
- Len = priority_queue:len(Mailbox),
- {Len,
- case Len > 100 of
- false -> priority_queue:to_list(Mailbox);
- true -> {summary,
- orddict:to_list(
- lists:foldl(
- fun ({P, _V}, Counts) ->
- orddict:update_counter(P, 1, Counts)
- end, orddict:new(), priority_queue:to_list(Mailbox)))}
- end}.
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
diff --git a/src/rabbit_amqqueue_sup.erl b/src/rabbit_amqqueue_sup.erl
index 2c28adce..7b3ebcf2 100644
--- a/src/rabbit_amqqueue_sup.erl
+++ b/src/rabbit_amqqueue_sup.erl
@@ -26,6 +26,20 @@
-define(SERVER, ?MODULE).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_child/2 ::
+ (node(), [any()]) -> rabbit_types:ok(pid() | undefined) |
+ rabbit_types:ok({pid(), any()}) |
+ rabbit_types:error(any())).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
start_link() ->
supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl
index 6a018bd1..086a90b4 100644
--- a/src/rabbit_auth_backend_internal.erl
+++ b/src/rabbit_auth_backend_internal.erl
@@ -110,17 +110,13 @@ internal_check_user_login(Username, Fun) ->
Refused
end.
-check_vhost_access(#user{username = Username}, VHost) ->
- %% TODO: use dirty ops instead
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:read({rabbit_user_permission,
- #user_vhost{username = Username,
- virtual_host = VHost}}) of
- [] -> false;
- [_R] -> true
- end
- end).
+check_vhost_access(#user{username = Username}, VHostPath) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] -> false;
+ [_R] -> true
+ end.
check_resource_access(#user{username = Username},
#resource{virtual_host = VHostPath, name = Name},
@@ -150,6 +146,7 @@ permission_index(read) -> #permission.read.
%% Manipulation of the user database
add_user(Username, Password) ->
+ rabbit_log:info("Creating user '~s'~n", [Username]),
R = rabbit_misc:execute_mnesia_transaction(
fun () ->
case mnesia:wread({rabbit_user, Username}) of
@@ -165,10 +162,10 @@ add_user(Username, Password) ->
mnesia:abort({user_already_exists, Username})
end
end),
- rabbit_log:info("Created user ~p~n", [Username]),
R.
delete_user(Username) ->
+ rabbit_log:info("Deleting user '~s'~n", [Username]),
R = rabbit_misc:execute_mnesia_transaction(
rabbit_misc:with_user(
Username,
@@ -185,13 +182,14 @@ delete_user(Username) ->
write)],
ok
end)),
- rabbit_log:info("Deleted user ~p~n", [Username]),
R.
change_password(Username, Password) ->
+ rabbit_log:info("Changing password for '~s'~n", [Username]),
change_password_hash(Username, hash_password(Password)).
clear_password(Username) ->
+ rabbit_log:info("Clearing password for '~s'~n", [Username]),
change_password_hash(Username, <<"">>).
change_password_hash(Username, PasswordHash) ->
@@ -199,7 +197,6 @@ change_password_hash(Username, PasswordHash) ->
User#internal_user{
password_hash = PasswordHash }
end),
- rabbit_log:info("Changed password for user ~p~n", [Username]),
R.
hash_password(Cleartext) ->
@@ -221,11 +218,10 @@ salted_md5(Salt, Cleartext) ->
erlang:md5(Salted).
set_tags(Username, Tags) ->
+ rabbit_log:info("Setting user tags for user '~s' to ~p~n", [Username, Tags]),
R = update_user(Username, fun(User) ->
User#internal_user{tags = Tags}
end),
- rabbit_log:info("Set user tags for user ~p to ~p~n",
- [Username, Tags]),
R.
update_user(Username, Fun) ->
@@ -255,6 +251,8 @@ validate_regexp(RegexpBin) ->
end.
set_permissions(Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm) ->
+ rabbit_log:info("Setting permissions for '~s' in '~s' to '~s', '~s', '~s'~n",
+ [Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm]),
lists:map(fun validate_regexp/1, [ConfigurePerm, WritePerm, ReadPerm]),
rabbit_misc:execute_mnesia_transaction(
rabbit_misc:with_user_and_vhost(
diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl
index 1203fb45..586ab6c7 100644
--- a/src/rabbit_backing_queue_qc.erl
+++ b/src/rabbit_backing_queue_qc.erl
@@ -108,7 +108,7 @@ qc_publish(#state{bqstate = BQ}) ->
[qc_message(),
#message_properties{needs_confirming = frequency([{1, true},
{20, false}]),
- expiry = choose(0, 10)},
+ expiry = oneof([undefined | lists:seq(1, 10)])},
self(), BQ]}.
qc_publish_multiple(#state{bqstate = BQ}) ->
@@ -386,7 +386,7 @@ rand_choice(List) -> [lists:nth(random:uniform(length(List)), List)].
dropfun(Props) ->
Expiry = eval({call, erlang, element,
[?RECORD_INDEX(expiry, message_properties), Props]}),
- Expiry =/= 0.
+ Expiry =/= 1.
drop_messages(Messages) ->
case gb_trees:is_empty(Messages) of
diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl
index 9cc406e7..b266d366 100644
--- a/src/rabbit_basic.erl
+++ b/src/rabbit_basic.erl
@@ -18,8 +18,8 @@
-include("rabbit.hrl").
-include("rabbit_framing.hrl").
--export([publish/1, message/3, message/4, properties/1, delivery/4]).
--export([publish/4, publish/6]).
+-export([publish/4, publish/6, publish/1,
+ message/3, message/4, properties/1, delivery/4]).
-export([build_content/2, from_content/1]).
%%----------------------------------------------------------------------------
@@ -35,6 +35,12 @@
-type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())).
-type(body_input() :: (binary() | [binary()])).
+-spec(publish/4 ::
+ (exchange_input(), rabbit_router:routing_key(), properties_input(),
+ body_input()) -> publish_result()).
+-spec(publish/6 ::
+ (exchange_input(), rabbit_router:routing_key(), boolean(), boolean(),
+ properties_input(), body_input()) -> publish_result()).
-spec(publish/1 ::
(rabbit_types:delivery()) -> publish_result()).
-spec(delivery/4 ::
@@ -49,12 +55,6 @@
rabbit_types:ok_or_error2(rabbit_types:message(), any())).
-spec(properties/1 ::
(properties_input()) -> rabbit_framing:amqp_property_record()).
--spec(publish/4 ::
- (exchange_input(), rabbit_router:routing_key(), properties_input(),
- body_input()) -> publish_result()).
--spec(publish/6 ::
- (exchange_input(), rabbit_router:routing_key(), boolean(), boolean(),
- properties_input(), body_input()) -> publish_result()).
-spec(build_content/2 :: (rabbit_framing:amqp_property_record(),
binary() | [binary()]) -> rabbit_types:content()).
-spec(from_content/1 :: (rabbit_types:content()) ->
@@ -64,13 +64,34 @@
%%----------------------------------------------------------------------------
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+publish(Exchange, RoutingKeyBin, Properties, Body) ->
+ publish(Exchange, RoutingKeyBin, false, false, Properties, Body).
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+publish(X = #exchange{name = XName}, RKey, Mandatory, Immediate, Props, Body) ->
+ publish(X, delivery(Mandatory, Immediate,
+ message(XName, RKey, properties(Props), Body),
+ undefined));
+publish(XName, RKey, Mandatory, Immediate, Props, Body) ->
+ publish(delivery(Mandatory, Immediate,
+ message(XName, RKey, properties(Props), Body),
+ undefined)).
+
publish(Delivery = #delivery{
- message = #basic_message{exchange_name = ExchangeName}}) ->
- case rabbit_exchange:lookup(ExchangeName) of
+ message = #basic_message{exchange_name = XName}}) ->
+ case rabbit_exchange:lookup(XName) of
{ok, X} -> publish(X, Delivery);
- Other -> Other
+ Err -> Err
end.
+publish(X, Delivery) ->
+ {RoutingRes, DeliveredQPids} =
+ rabbit_router:deliver(rabbit_exchange:route(X, Delivery), Delivery),
+ {ok, RoutingRes, DeliveredQPids}.
+
delivery(Mandatory, Immediate, Message, MsgSeqNo) ->
#delivery{mandatory = Mandatory, immediate = Immediate, sender = self(),
message = Message, msg_seq_no = MsgSeqNo}.
@@ -113,11 +134,10 @@ strip_header(#content{properties = Props = #'P_basic'{headers = Headers}}
headers = Headers0}})
end.
-message(ExchangeName, RoutingKey,
- #content{properties = Props} = DecodedContent) ->
+message(XName, RoutingKey, #content{properties = Props} = DecodedContent) ->
try
{ok, #basic_message{
- exchange_name = ExchangeName,
+ exchange_name = XName,
content = strip_header(DecodedContent, ?DELETED_HEADER),
id = rabbit_guid:guid(),
is_persistent = is_message_persistent(DecodedContent),
@@ -127,10 +147,10 @@ message(ExchangeName, RoutingKey,
{error, _Reason} = Error -> Error
end.
-message(ExchangeName, RoutingKey, RawProperties, Body) ->
+message(XName, RoutingKey, RawProperties, Body) ->
Properties = properties(RawProperties),
Content = build_content(Properties, Body),
- {ok, Msg} = message(ExchangeName, RoutingKey, Content),
+ {ok, Msg} = message(XName, RoutingKey, Content),
Msg.
properties(P = #'P_basic'{}) ->
@@ -152,28 +172,6 @@ indexof([], _Element, _N) -> 0;
indexof([Element | _Rest], Element, N) -> N;
indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1).
-%% Convenience function, for avoiding round-trips in calls across the
-%% erlang distributed network.
-publish(Exchange, RoutingKeyBin, Properties, Body) ->
- publish(Exchange, RoutingKeyBin, false, false, Properties, Body).
-
-%% Convenience function, for avoiding round-trips in calls across the
-%% erlang distributed network.
-publish(X = #exchange{name = XName}, RKey, Mandatory, Immediate, Props, Body) ->
- publish(X, delivery(Mandatory, Immediate,
- message(XName, RKey, properties(Props), Body),
- undefined));
-publish(XName, RKey, Mandatory, Immediate, Props, Body) ->
- case rabbit_exchange:lookup(XName) of
- {ok, X} -> publish(X, RKey, Mandatory, Immediate, Props, Body);
- Err -> Err
- end.
-
-publish(X, Delivery) ->
- {RoutingRes, DeliveredQPids} =
- rabbit_router:deliver(rabbit_exchange:route(X, Delivery), Delivery),
- {ok, RoutingRes, DeliveredQPids}.
-
is_message_persistent(#content{properties = #'P_basic'{
delivery_mode = Mode}}) ->
case Mode of
diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl
index 205d5bba..e625a427 100644
--- a/src/rabbit_binding.erl
+++ b/src/rabbit_binding.erl
@@ -40,7 +40,7 @@
'source_and_destination_not_found')).
-type(bind_ok_or_error() :: 'ok' | bind_errors() |
rabbit_types:error('binding_not_found')).
--type(bind_res() :: bind_ok_or_error() | rabbit_misc:const(bind_ok_or_error())).
+-type(bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error())).
-type(inner_fun() ::
fun((rabbit_types:exchange(),
rabbit_types:exchange() | rabbit_types:amqqueue()) ->
@@ -108,21 +108,34 @@ recover(XNames, QNames) ->
SelectSet = fun (#resource{kind = exchange}) -> XNameSet;
(#resource{kind = queue}) -> QNameSet
end,
- [recover_semi_durable_route(R, SelectSet(Dst)) ||
+ {ok, Gatherer} = gatherer:start_link(),
+ [recover_semi_durable_route(Gatherer, R, SelectSet(Dst)) ||
R = #route{binding = #binding{destination = Dst}} <-
rabbit_misc:dirty_read_all(rabbit_semi_durable_route)],
+ empty = gatherer:out(Gatherer),
+ ok = gatherer:stop(Gatherer),
ok.
-recover_semi_durable_route(R = #route{binding = B}, ToRecover) ->
+recover_semi_durable_route(Gatherer, R = #route{binding = B}, ToRecover) ->
#binding{source = Src, destination = Dst} = B,
- {ok, X} = rabbit_exchange:lookup(Src),
+ case sets:is_element(Dst, ToRecover) of
+ true -> {ok, X} = rabbit_exchange:lookup(Src),
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ recover_semi_durable_route_txn(R, X),
+ gatherer:finish(Gatherer)
+ end);
+ false -> ok
+ end.
+
+recover_semi_durable_route_txn(R = #route{binding = B}, X) ->
rabbit_misc:execute_mnesia_transaction(
fun () ->
- Rs = mnesia:match_object(rabbit_semi_durable_route, R, read),
- case Rs =/= [] andalso sets:is_element(Dst, ToRecover) of
- false -> no_recover;
- true -> ok = sync_transient_route(R, fun mnesia:write/3),
- rabbit_exchange:serial(X)
+ case mnesia:match_object(rabbit_semi_durable_route, R, read) of
+ [] -> no_recover;
+ _ -> ok = sync_transient_route(R, fun mnesia:write/3),
+ rabbit_exchange:serial(X)
end
end,
fun (no_recover, _) -> ok;
diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl
index f398fcc5..d2f55277 100644
--- a/src/rabbit_channel.erl
+++ b/src/rabbit_channel.erl
@@ -23,17 +23,20 @@
-export([start_link/10, do/2, do/3, flush/1, shutdown/1]).
-export([send_command/2, deliver/4, flushed/2, confirm/2]).
-export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]).
--export([refresh_config_all/0, emit_stats/1, ready_for_close/1]).
+-export([refresh_config_local/0, ready_for_close/1]).
+-export([force_event_refresh/0]).
-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
handle_info/2, handle_pre_hibernate/1, prioritise_call/3,
- prioritise_cast/2]).
+ prioritise_cast/2, prioritise_info/2, format_message_queue/2]).
+%% Internal
+-export([list_local/0]).
-record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid,
- limiter_pid, start_limiter_fun, tx_status, next_tag,
+ limiter, tx_status, next_tag,
unacked_message_q, uncommitted_message_q, uncommitted_ack_q,
- user, virtual_host, most_recently_declared_queue,
- consumer_mapping, blocking, consumer_monitors, queue_collector_pid,
+ user, virtual_host, most_recently_declared_queue, queue_monitors,
+ consumer_mapping, blocking, queue_consumers, queue_collector_pid,
stats_timer, confirm_enabled, publish_seqno, unconfirmed_mq,
unconfirmed_qm, confirmed, capabilities, trace_state}).
@@ -71,8 +74,7 @@
-spec(start_link/10 ::
(channel_number(), pid(), pid(), pid(), rabbit_types:protocol(),
rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
- pid(), fun ((non_neg_integer()) -> rabbit_types:ok(pid()))) ->
- rabbit_types:ok_pid_or_error()).
+ pid(), rabbit_limiter:token()) -> rabbit_types:ok_pid_or_error()).
-spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
-spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(),
rabbit_types:maybe(rabbit_types:content())) -> 'ok').
@@ -85,24 +87,25 @@
-spec(flushed/2 :: (pid(), pid()) -> 'ok').
-spec(confirm/2 ::(pid(), [non_neg_integer()]) -> 'ok').
-spec(list/0 :: () -> [pid()]).
+-spec(list_local/0 :: () -> [pid()]).
-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
-spec(info/1 :: (pid()) -> rabbit_types:infos()).
-spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
-spec(info_all/0 :: () -> [rabbit_types:infos()]).
-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(refresh_config_all/0 :: () -> 'ok').
--spec(emit_stats/1 :: (pid()) -> 'ok').
+-spec(refresh_config_local/0 :: () -> 'ok').
-spec(ready_for_close/1 :: (pid()) -> 'ok').
+-spec(force_event_refresh/0 :: () -> 'ok').
-endif.
%%----------------------------------------------------------------------------
start_link(Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost,
- Capabilities, CollectorPid, StartLimiterFun) ->
+ Capabilities, CollectorPid, Limiter) ->
gen_server2:start_link(
?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, Protocol, User,
- VHost, Capabilities, CollectorPid, StartLimiterFun], []).
+ VHost, Capabilities, CollectorPid, Limiter], []).
do(Pid, Method) ->
do(Pid, Method, none).
@@ -129,6 +132,10 @@ confirm(Pid, MsgSeqNos) ->
gen_server2:cast(Pid, {confirm, MsgSeqNos, self()}).
list() ->
+ rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:running_clustered_nodes(),
+ rabbit_channel, list_local, []).
+
+list_local() ->
pg_local:get_members(rabbit_channels).
info_keys() -> ?INFO_KEYS.
@@ -148,21 +155,22 @@ info_all() ->
info_all(Items) ->
rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
-refresh_config_all() ->
+refresh_config_local() ->
rabbit_misc:upmap(
- fun (C) -> gen_server2:call(C, refresh_config) end, list()),
+ fun (C) -> gen_server2:call(C, refresh_config) end, list_local()),
ok.
-emit_stats(Pid) ->
- gen_server2:cast(Pid, emit_stats).
-
ready_for_close(Pid) ->
gen_server2:cast(Pid, ready_for_close).
+force_event_refresh() ->
+ [gen_server2:cast(C, force_event_refresh) || C <- list()],
+ ok.
+
%%---------------------------------------------------------------------------
init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost,
- Capabilities, CollectorPid, StartLimiterFun]) ->
+ Capabilities, CollectorPid, Limiter]) ->
process_flag(trap_exit, true),
ok = pg_local:join(rabbit_channels, self()),
StatsTimer = rabbit_event:init_stats_timer(),
@@ -172,8 +180,7 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost,
reader_pid = ReaderPid,
writer_pid = WriterPid,
conn_pid = ConnPid,
- limiter_pid = undefined,
- start_limiter_fun = StartLimiterFun,
+ limiter = Limiter,
tx_status = none,
next_tag = 1,
unacked_message_q = queue:new(),
@@ -182,9 +189,10 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost,
user = User,
virtual_host = VHost,
most_recently_declared_queue = <<>>,
+ queue_monitors = dict:new(),
consumer_mapping = dict:new(),
- blocking = dict:new(),
- consumer_monitors = dict:new(),
+ blocking = sets:new(),
+ queue_consumers = dict:new(),
queue_collector_pid = CollectorPid,
stats_timer = StatsTimer,
confirm_enabled = false,
@@ -196,7 +204,7 @@ init([Channel, ReaderPid, WriterPid, ConnPid, Protocol, User, VHost,
trace_state = rabbit_trace:init(VHost)},
rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State)),
rabbit_event:if_enabled(StatsTimer,
- fun() -> internal_emit_stats(State) end),
+ fun() -> emit_stats(State) end),
{ok, State, hibernate,
{backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
@@ -209,11 +217,16 @@ prioritise_call(Msg, _From, _State) ->
prioritise_cast(Msg, _State) ->
case Msg of
- emit_stats -> 7;
{confirm, _MsgSeqNos, _QPid} -> 5;
_ -> 0
end.
+prioritise_info(Msg, _State) ->
+ case Msg of
+ emit_stats -> 7;
+ _ -> 0
+ end.
+
handle_call(flush, _From, State) ->
reply(ok, State);
@@ -263,7 +276,7 @@ handle_cast(terminate, State) ->
handle_cast({command, #'basic.consume_ok'{consumer_tag = ConsumerTag} = Msg},
State = #ch{writer_pid = WriterPid}) ->
ok = rabbit_writer:send_command(WriterPid, Msg),
- noreply(monitor_consumer(ConsumerTag, State));
+ noreply(consumer_monitor(ConsumerTag, State));
handle_cast({command, Msg}, State = #ch{writer_pid = WriterPid}) ->
ok = rabbit_writer:send_command(WriterPid, Msg),
@@ -287,19 +300,18 @@ handle_cast({deliver, ConsumerTag, AckRequired,
exchange = ExchangeName#resource.name,
routing_key = RoutingKey},
rabbit_writer:send_command_and_notify(WriterPid, QPid, self(), M, Content),
- maybe_incr_stats([{QPid, 1}], case AckRequired of
- true -> deliver;
- false -> deliver_no_ack
- end, State),
- maybe_incr_redeliver_stats(Redelivered, QPid, State),
+ State2 = maybe_incr_stats([{QPid, 1}], case AckRequired of
+ true -> deliver;
+ false -> deliver_no_ack
+ end, State1),
+ State3 = maybe_incr_redeliver_stats(Redelivered, QPid, State2),
rabbit_trace:tap_trace_out(Msg, TraceState),
- noreply(State1#ch{next_tag = DeliveryTag + 1});
+ noreply(State3#ch{next_tag = DeliveryTag + 1});
-handle_cast(emit_stats, State = #ch{stats_timer = StatsTimer}) ->
- internal_emit_stats(State),
- noreply([ensure_stats_timer],
- State#ch{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)});
+handle_cast(force_event_refresh, State) ->
+ rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State)),
+ noreply(State);
handle_cast({confirm, MsgSeqNos, From}, State) ->
State1 = #ch{confirmed = C} = confirm(MsgSeqNos, From, State),
noreply([send_confirms], State1, case C of [] -> hibernate; _ -> 0 end).
@@ -307,26 +319,26 @@ handle_cast({confirm, MsgSeqNos, From}, State) ->
handle_info(timeout, State) ->
noreply(State);
-handle_info({'DOWN', MRef, process, QPid, Reason},
- State = #ch{consumer_monitors = ConsumerMonitors}) ->
- noreply(
- case dict:find(MRef, ConsumerMonitors) of
- error ->
- handle_publishing_queue_down(QPid, Reason, State);
- {ok, ConsumerTag} ->
- handle_consuming_queue_down(MRef, ConsumerTag, State)
- end);
+handle_info(emit_stats, State = #ch{stats_timer = StatsTimer}) ->
+ emit_stats(State),
+ noreply([ensure_stats_timer],
+ State#ch{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)});
+
+handle_info({'DOWN', _MRef, process, QPid, Reason}, State) ->
+ State1 = handle_publishing_queue_down(QPid, Reason, State),
+ State2 = queue_blocked(QPid, State1),
+ State3 = handle_consuming_queue_down(QPid, State2),
+ erase_queue_stats(QPid),
+ noreply(State3#ch{queue_monitors =
+ dict:erase(QPid, State3#ch.queue_monitors)});
handle_info({'EXIT', _Pid, Reason}, State) ->
{stop, Reason, State}.
handle_pre_hibernate(State = #ch{stats_timer = StatsTimer}) ->
ok = clear_permission_cache(),
- rabbit_event:if_enabled(StatsTimer,
- fun () ->
- internal_emit_stats(
- State, [{idle_since, now()}])
- end),
+ rabbit_event:if_enabled(
+ StatsTimer, fun () -> emit_stats(State, [{idle_since, now()}]) end),
StatsTimer1 = rabbit_event:stop_stats_timer(StatsTimer),
{hibernate, State#ch{stats_timer = StatsTimer1}}.
@@ -344,6 +356,8 @@ terminate(Reason, State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
%%---------------------------------------------------------------------------
reply(Reply, NewState) -> reply(Reply, [], NewState).
@@ -368,8 +382,7 @@ next_state(Mask, State) ->
ensure_stats_timer(State = #ch{stats_timer = StatsTimer}) ->
ChPid = self(),
State#ch{stats_timer = rabbit_event:ensure_stats_timer(
- StatsTimer,
- fun() -> emit_stats(ChPid) end)}.
+ StatsTimer, ChPid, emit_stats)}.
return_ok(State, true, _Msg) -> {noreply, State};
return_ok(State, false, Msg) -> {reply, Msg, State}.
@@ -502,17 +515,16 @@ check_name(_Kind, NameBin) ->
NameBin.
queue_blocked(QPid, State = #ch{blocking = Blocking}) ->
- case dict:find(QPid, Blocking) of
- error -> State;
- {ok, MRef} -> true = erlang:demonitor(MRef),
- Blocking1 = dict:erase(QPid, Blocking),
- ok = case dict:size(Blocking1) of
- 0 -> rabbit_writer:send_command(
- State#ch.writer_pid,
- #'channel.flow_ok'{active = false});
- _ -> ok
- end,
- State#ch{blocking = Blocking1}
+ case sets:is_element(QPid, Blocking) of
+ false -> State;
+ true -> Blocking1 = sets:del_element(QPid, Blocking),
+ ok = case sets:size(Blocking1) of
+ 0 -> rabbit_writer:send_command(
+ State#ch.writer_pid,
+ #'channel.flow_ok'{active = false});
+ _ -> ok
+ end,
+ demonitor_queue(QPid, State#ch{blocking = Blocking1})
end.
record_confirm(undefined, _, State) ->
@@ -531,38 +543,41 @@ confirm(MsgSeqNos, QPid, State) ->
{MXs, State1} = process_confirms(MsgSeqNos, QPid, false, State),
record_confirms(MXs, State1).
-process_confirms(MsgSeqNos, QPid, Nack, State = #ch{unconfirmed_mq = UMQ,
- unconfirmed_qm = UQM}) ->
- {MXs, UMQ1, UQM1} =
- lists:foldl(
- fun(MsgSeqNo, {_MXs, UMQ0, _UQM} = Acc) ->
- case gb_trees:lookup(MsgSeqNo, UMQ0) of
- {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ,
- Acc, Nack);
- none -> Acc
- end
- end, {[], UMQ, UQM}, MsgSeqNos),
- {MXs, State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}}.
-
-remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs}, {MXs, UMQ, UQM}, Nack) ->
- UQM1 = case gb_trees:lookup(QPid, UQM) of
- {value, MsgSeqNos} ->
- MsgSeqNos1 = gb_sets:delete(MsgSeqNo, MsgSeqNos),
- case gb_sets:is_empty(MsgSeqNos1) of
- true -> gb_trees:delete(QPid, UQM);
- false -> gb_trees:update(QPid, MsgSeqNos1, UQM)
- end;
- none ->
- UQM
- end,
+process_confirms(MsgSeqNos, QPid, Nack, State) ->
+ lists:foldl(
+ fun(MsgSeqNo, {_MXs, _State = #ch{unconfirmed_mq = UMQ0}} = Acc) ->
+ case gb_trees:lookup(MsgSeqNo, UMQ0) of
+ {value, XQ} -> remove_unconfirmed(MsgSeqNo, QPid, XQ,
+ Acc, Nack);
+ none -> Acc
+ end
+ end, {[], State}, MsgSeqNos).
+
+remove_unconfirmed(MsgSeqNo, QPid, {XName, Qs},
+ {MXs, State = #ch{unconfirmed_mq = UMQ,
+ unconfirmed_qm = UQM}},
+ Nack) ->
+ State1 = case gb_trees:lookup(QPid, UQM) of
+ {value, MsgSeqNos} ->
+ MsgSeqNos1 = gb_sets:delete(MsgSeqNo, MsgSeqNos),
+ case gb_sets:is_empty(MsgSeqNos1) of
+ true -> UQM1 = gb_trees:delete(QPid, UQM),
+ demonitor_queue(
+ QPid, State#ch{unconfirmed_qm = UQM1});
+ false -> UQM1 = gb_trees:update(QPid, MsgSeqNos1, UQM),
+ State#ch{unconfirmed_qm = UQM1}
+ end;
+ none ->
+ State
+ end,
Qs1 = gb_sets:del_element(QPid, Qs),
%% If QPid somehow died initiating a nack, clear the message from
%% internal data-structures. Also, cleanup empty entries.
case (Nack orelse gb_sets:is_empty(Qs1)) of
- true ->
- {[{MsgSeqNo, XName} | MXs], gb_trees:delete(MsgSeqNo, UMQ), UQM1};
- false ->
- {MXs, gb_trees:update(MsgSeqNo, {XName, Qs1}, UMQ), UQM1}
+ true -> UMQ1 = gb_trees:delete(MsgSeqNo, UMQ),
+ {[{MsgSeqNo, XName} | MXs], State1#ch{unconfirmed_mq = UMQ1}};
+ false -> UMQ1 = gb_trees:update(MsgSeqNo, {XName, Qs1}, UMQ),
+ {MXs, State1#ch{unconfirmed_mq = UMQ1}}
end.
handle_method(#'channel.open'{}, _, State = #ch{state = starting}) ->
@@ -679,11 +694,11 @@ handle_method(#'basic.get'{queue = QueueNameBin,
State1 = lock_message(not(NoAck),
ack_record(DeliveryTag, none, Msg),
State),
- maybe_incr_stats([{QPid, 1}], case NoAck of
- true -> get_no_ack;
- false -> get
- end, State),
- maybe_incr_redeliver_stats(Redelivered, QPid, State),
+ State2 = maybe_incr_stats([{QPid, 1}], case NoAck of
+ true -> get_no_ack;
+ false -> get
+ end, State1),
+ State3 = maybe_incr_redeliver_stats(Redelivered, QPid, State2),
rabbit_trace:tap_trace_out(Msg, TraceState),
ok = rabbit_writer:send_command(
WriterPid,
@@ -693,7 +708,7 @@ handle_method(#'basic.get'{queue = QueueNameBin,
routing_key = RoutingKey,
message_count = MessageCount},
Content),
- {noreply, State1#ch{next_tag = DeliveryTag + 1}};
+ {noreply, State3#ch{next_tag = DeliveryTag + 1}};
empty ->
{reply, #'basic.get_empty'{}, State}
end;
@@ -705,7 +720,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin,
exclusive = ExclusiveConsume,
nowait = NoWait},
_, State = #ch{conn_pid = ConnPid,
- limiter_pid = LimiterPid,
+ limiter = Limiter,
consumer_mapping = ConsumerMapping}) ->
case dict:find(ConsumerTag, ConsumerMapping) of
error ->
@@ -724,7 +739,7 @@ handle_method(#'basic.consume'{queue = QueueNameBin,
QueueName, ConnPid,
fun (Q) ->
{rabbit_amqqueue:basic_consume(
- Q, NoAck, self(), LimiterPid,
+ Q, NoAck, self(), Limiter,
ActualConsumerTag, ExclusiveConsume,
ok_msg(NoWait, #'basic.consume_ok'{
consumer_tag = ActualConsumerTag})),
@@ -732,12 +747,11 @@ handle_method(#'basic.consume'{queue = QueueNameBin,
end) of
{ok, Q} ->
State1 = State#ch{consumer_mapping =
- dict:store(ActualConsumerTag,
- {Q, undefined},
+ dict:store(ActualConsumerTag, Q,
ConsumerMapping)},
{noreply,
case NoWait of
- true -> monitor_consumer(ActualConsumerTag, State1);
+ true -> consumer_monitor(ActualConsumerTag, State1);
false -> State1
end};
{{error, exclusive_consume_unavailable}, _Q} ->
@@ -754,22 +768,26 @@ handle_method(#'basic.consume'{queue = QueueNameBin,
handle_method(#'basic.cancel'{consumer_tag = ConsumerTag,
nowait = NoWait},
_, State = #ch{consumer_mapping = ConsumerMapping,
- consumer_monitors = ConsumerMonitors}) ->
+ queue_consumers = QCons}) ->
OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
case dict:find(ConsumerTag, ConsumerMapping) of
error ->
%% Spec requires we ignore this situation.
return_ok(State, NoWait, OkMsg);
- {ok, {Q, MRef}} ->
- ConsumerMonitors1 =
- case MRef of
- undefined -> ConsumerMonitors;
- _ -> true = erlang:demonitor(MRef),
- dict:erase(MRef, ConsumerMonitors)
+ {ok, Q = #amqqueue{pid = QPid}} ->
+ ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping),
+ QCons1 =
+ case dict:find(QPid, QCons) of
+ error -> QCons;
+ {ok, CTags} -> CTags1 = gb_sets:delete(ConsumerTag, CTags),
+ case gb_sets:is_empty(CTags1) of
+ true -> dict:erase(QPid, QCons);
+ false -> dict:store(QPid, CTags1, QCons)
+ end
end,
- NewState = State#ch{consumer_mapping = dict:erase(ConsumerTag,
- ConsumerMapping),
- consumer_monitors = ConsumerMonitors1},
+ NewState = demonitor_queue(
+ Q, State#ch{consumer_mapping = ConsumerMapping1,
+ queue_consumers = QCons1}),
%% In order to ensure that no more messages are sent to
%% the consumer after the cancel_ok has been sent, we get
%% the queue process to send the cancel_ok on our
@@ -798,22 +816,23 @@ handle_method(#'basic.qos'{prefetch_size = Size}, _, _State) when Size /= 0 ->
rabbit_misc:protocol_error(not_implemented,
"prefetch_size!=0 (~w)", [Size]);
-handle_method(#'basic.qos'{prefetch_count = PrefetchCount},
- _, State = #ch{limiter_pid = LimiterPid}) ->
- LimiterPid1 = case {LimiterPid, PrefetchCount} of
- {undefined, 0} -> undefined;
- {undefined, _} -> start_limiter(State);
- {_, _} -> LimiterPid
- end,
- LimiterPid2 = case rabbit_limiter:limit(LimiterPid1, PrefetchCount) of
- ok -> LimiterPid1;
- stopped -> unlimit_queues(State)
- end,
- {reply, #'basic.qos_ok'{}, State#ch{limiter_pid = LimiterPid2}};
+handle_method(#'basic.qos'{prefetch_count = PrefetchCount}, _,
+ State = #ch{limiter = Limiter}) ->
+ Limiter1 = case {rabbit_limiter:is_enabled(Limiter), PrefetchCount} of
+ {false, 0} -> Limiter;
+ {false, _} -> enable_limiter(State);
+ {_, _} -> Limiter
+ end,
+ Limiter3 = case rabbit_limiter:limit(Limiter1, PrefetchCount) of
+ ok -> Limiter1;
+ {disabled, Limiter2} -> ok = limit_queues(Limiter2, State),
+ Limiter2
+ end,
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter3}};
handle_method(#'basic.recover_async'{requeue = true},
_, State = #ch{unacked_message_q = UAMQ,
- limiter_pid = LimiterPid}) ->
+ limiter = Limiter}) ->
OkFun = fun () -> ok end,
ok = fold_per_queue(
fun (QPid, MsgIds, ok) ->
@@ -827,7 +846,7 @@ handle_method(#'basic.recover_async'{requeue = true},
QPid, lists:reverse(MsgIds), self())
end)
end, ok, UAMQ),
- ok = notify_limiter(LimiterPid, UAMQ),
+ ok = notify_limiter(Limiter, UAMQ),
%% No answer required - basic.recover is the newer, synchronous
%% variant of this method
{noreply, State#ch{unacked_message_q = queue:new()}};
@@ -1074,29 +1093,31 @@ handle_method(#'confirm.select'{nowait = NoWait}, _, State) ->
NoWait, #'confirm.select_ok'{});
handle_method(#'channel.flow'{active = true}, _,
- State = #ch{limiter_pid = LimiterPid}) ->
- LimiterPid1 = case rabbit_limiter:unblock(LimiterPid) of
- ok -> LimiterPid;
- stopped -> unlimit_queues(State)
- end,
- {reply, #'channel.flow_ok'{active = true},
- State#ch{limiter_pid = LimiterPid1}};
+ State = #ch{limiter = Limiter}) ->
+ Limiter2 = case rabbit_limiter:unblock(Limiter) of
+ ok -> Limiter;
+ {disabled, Limiter1} -> ok = limit_queues(Limiter1, State),
+ Limiter1
+ end,
+ {reply, #'channel.flow_ok'{active = true}, State#ch{limiter = Limiter2}};
handle_method(#'channel.flow'{active = false}, _,
- State = #ch{limiter_pid = LimiterPid,
- consumer_mapping = Consumers}) ->
- LimiterPid1 = case LimiterPid of
- undefined -> start_limiter(State);
- Other -> Other
- end,
- State1 = State#ch{limiter_pid = LimiterPid1},
- ok = rabbit_limiter:block(LimiterPid1),
+ State = #ch{consumer_mapping = Consumers,
+ limiter = Limiter}) ->
+ Limiter1 = case rabbit_limiter:is_enabled(Limiter) of
+ true -> Limiter;
+ false -> enable_limiter(State)
+ end,
+ State1 = State#ch{limiter = Limiter1},
+ ok = rabbit_limiter:block(Limiter1),
case consumer_queues(Consumers) of
[] -> {reply, #'channel.flow_ok'{active = false}, State1};
- QPids -> Queues = [{QPid, erlang:monitor(process, QPid)} ||
- QPid <- QPids],
+ QPids -> State2 = lists:foldl(fun monitor_queue/2,
+ State1#ch{blocking =
+ sets:from_list(QPids)},
+ QPids),
ok = rabbit_amqqueue:flush_all(QPids, self()),
- {noreply, State1#ch{blocking = dict:from_list(Queues)}}
+ {noreply, State2}
end;
handle_method(_MethodRecord, _Content, _State) ->
@@ -1105,23 +1126,51 @@ handle_method(_MethodRecord, _Content, _State) ->
%%----------------------------------------------------------------------------
-monitor_consumer(ConsumerTag, State = #ch{consumer_mapping = ConsumerMapping,
- consumer_monitors = ConsumerMonitors,
- capabilities = Capabilities}) ->
+consumer_monitor(ConsumerTag,
+ State = #ch{consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons,
+ capabilities = Capabilities}) ->
case rabbit_misc:table_lookup(
Capabilities, <<"consumer_cancel_notify">>) of
{bool, true} ->
- {#amqqueue{pid = QPid} = Q, undefined} =
- dict:fetch(ConsumerTag, ConsumerMapping),
- MRef = erlang:monitor(process, QPid),
- State#ch{consumer_mapping =
- dict:store(ConsumerTag, {Q, MRef}, ConsumerMapping),
- consumer_monitors =
- dict:store(MRef, ConsumerTag, ConsumerMonitors)};
+ #amqqueue{pid = QPid} = dict:fetch(ConsumerTag, ConsumerMapping),
+ QCons1 = dict:update(QPid,
+ fun (CTags) ->
+ gb_sets:insert(ConsumerTag, CTags)
+ end,
+ gb_sets:singleton(ConsumerTag),
+ QCons),
+ monitor_queue(QPid, State#ch{queue_consumers = QCons1});
_ ->
State
end.
+monitor_queue(QPid, State = #ch{queue_monitors = QMons}) ->
+ case (not dict:is_key(QPid, QMons) andalso
+ queue_monitor_needed(QPid, State)) of
+ true -> MRef = erlang:monitor(process, QPid),
+ State#ch{queue_monitors = dict:store(QPid, MRef, QMons)};
+ false -> State
+ end.
+
+demonitor_queue(QPid, State = #ch{queue_monitors = QMons}) ->
+ case (dict:is_key(QPid, QMons) andalso
+ not queue_monitor_needed(QPid, State)) of
+ true -> true = erlang:demonitor(dict:fetch(QPid, QMons)),
+ State#ch{queue_monitors = dict:erase(QPid, QMons)};
+ false -> State
+ end.
+
+queue_monitor_needed(QPid, #ch{stats_timer = StatsTimer,
+ queue_consumers = QCons,
+ blocking = Blocking,
+ unconfirmed_qm = UQM}) ->
+ StatsEnabled = rabbit_event:stats_level(StatsTimer) =:= fine,
+ ConsumerMonitored = dict:is_key(QPid, QCons),
+ QueueBlocked = sets:is_element(QPid, Blocking),
+ ConfirmMonitored = gb_trees:is_defined(QPid, UQM),
+ StatsEnabled or ConsumerMonitored or QueueBlocked or ConfirmMonitored.
+
handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) ->
MsgSeqNos = case gb_trees:lookup(QPid, UQM) of
{value, MsgSet} -> gb_sets:to_list(MsgSet);
@@ -1142,21 +1191,25 @@ handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed_qm = UQM}) ->
{true, fun send_nacks/2}
end,
{MXs, State2} = process_confirms(MsgSeqNos, QPid, Nack, State1),
- erase_queue_stats(QPid),
- State3 = SendFun(MXs, State2),
- queue_blocked(QPid, State3).
-
-handle_consuming_queue_down(MRef, ConsumerTag,
- State = #ch{consumer_mapping = ConsumerMapping,
- consumer_monitors = ConsumerMonitors,
- writer_pid = WriterPid}) ->
- ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping),
- ConsumerMonitors1 = dict:erase(MRef, ConsumerMonitors),
- Cancel = #'basic.cancel'{consumer_tag = ConsumerTag,
- nowait = true},
- ok = rabbit_writer:send_command(WriterPid, Cancel),
+ SendFun(MXs, State2).
+
+handle_consuming_queue_down(QPid,
+ State = #ch{consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons,
+ writer_pid = WriterPid}) ->
+ ConsumerTags = case dict:find(QPid, QCons) of
+ error -> gb_sets:new();
+ {ok, CTags} -> CTags
+ end,
+ ConsumerMapping1 =
+ gb_sets:fold(fun (CTag, CMap) ->
+ Cancel = #'basic.cancel'{consumer_tag = CTag,
+ nowait = true},
+ ok = rabbit_writer:send_command(WriterPid, Cancel),
+ dict:erase(CTag, CMap)
+ end, ConsumerMapping, ConsumerTags),
State#ch{consumer_mapping = ConsumerMapping1,
- consumer_monitors = ConsumerMonitors1}.
+ queue_consumers = dict:erase(QPid, QCons)}.
binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin,
RoutingKey, Arguments, ReturnMethod, NoWait,
@@ -1220,7 +1273,7 @@ reject(DeliveryTag, Requeue, Multiple, State = #ch{unacked_message_q = UAMQ}) ->
fun (QPid, MsgIds, ok) ->
rabbit_amqqueue:reject(QPid, MsgIds, Requeue, self())
end, ok, Acked),
- ok = notify_limiter(State#ch.limiter_pid, Acked),
+ ok = notify_limiter(State#ch.limiter, Acked),
{noreply, State#ch{unacked_message_q = Remaining}}.
ack_record(DeliveryTag, ConsumerTag,
@@ -1256,9 +1309,8 @@ ack(Acked, State) ->
ok = rabbit_amqqueue:ack(QPid, MsgIds, self()),
[{QPid, length(MsgIds)} | L]
end, [], Acked),
- maybe_incr_stats(QIncs, ack, State),
- ok = notify_limiter(State#ch.limiter_pid, Acked),
- State.
+ ok = notify_limiter(State#ch.limiter, Acked),
+ maybe_incr_stats(QIncs, ack, State).
new_tx(State) -> State#ch{uncommitted_message_q = queue:new(),
uncommitted_ack_q = queue:new()}.
@@ -1281,35 +1333,32 @@ fold_per_queue(F, Acc0, UAQ) ->
dict:fold(fun (QPid, MsgIds, Acc) -> F(QPid, MsgIds, Acc) end,
Acc0, D).
-start_limiter(State = #ch{unacked_message_q = UAMQ, start_limiter_fun = SLF}) ->
- {ok, LPid} = SLF(queue:len(UAMQ)),
- ok = limit_queues(LPid, State),
- LPid.
-
-unlimit_queues(State) ->
- ok = limit_queues(undefined, State),
- undefined.
+enable_limiter(State = #ch{unacked_message_q = UAMQ,
+ limiter = Limiter}) ->
+ Limiter1 = rabbit_limiter:enable(Limiter, queue:len(UAMQ)),
+ ok = limit_queues(Limiter1, State),
+ Limiter1.
-limit_queues(LPid, #ch{consumer_mapping = Consumers}) ->
- rabbit_amqqueue:limit_all(consumer_queues(Consumers), self(), LPid).
+limit_queues(Limiter, #ch{consumer_mapping = Consumers}) ->
+ rabbit_amqqueue:limit_all(consumer_queues(Consumers), self(), Limiter).
consumer_queues(Consumers) ->
lists:usort([QPid ||
- {_Key, {#amqqueue{pid = QPid}, _MRef}}
- <- dict:to_list(Consumers)]).
+ {_Key, #amqqueue{pid = QPid}} <- dict:to_list(Consumers)]).
%% tell the limiter about the number of acks that have been received
%% for messages delivered to subscribed consumers, but not acks for
%% messages sent in a response to a basic.get (identified by their
%% 'none' consumer tag)
-notify_limiter(undefined, _Acked) ->
- ok;
-notify_limiter(LimiterPid, Acked) ->
- case rabbit_misc:queue_fold(fun ({_, none, _}, Acc) -> Acc;
- ({_, _, _}, Acc) -> Acc + 1
- end, 0, Acked) of
- 0 -> ok;
- Count -> rabbit_limiter:ack(LimiterPid, Count)
+notify_limiter(Limiter, Acked) ->
+ case rabbit_limiter:is_enabled(Limiter) of
+ false -> ok;
+ true -> case rabbit_misc:queue_fold(fun ({_, none, _}, Acc) -> Acc;
+ ({_, _, _}, Acc) -> Acc + 1
+ end, 0, Acked) of
+ 0 -> ok;
+ Count -> rabbit_limiter:ack(Limiter, Count)
+ end
end.
deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
@@ -1321,38 +1370,37 @@ deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
XName, MsgSeqNo, Message, State),
maybe_incr_stats([{XName, 1} |
[{{QPid, XName}, 1} ||
- QPid <- DeliveredQPids]], publish, State1),
- State1.
+ QPid <- DeliveredQPids]], publish, State1).
process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) ->
ok = basic_return(Msg, State, no_route),
- maybe_incr_stats([{Msg#basic_message.exchange_name, 1}],
- return_unroutable, State),
- record_confirm(MsgSeqNo, XName, State);
+ record_confirm(MsgSeqNo, XName,
+ maybe_incr_stats([{Msg#basic_message.exchange_name, 1}],
+ return_unroutable, State));
process_routing_result(not_delivered, _, XName, MsgSeqNo, Msg, State) ->
ok = basic_return(Msg, State, no_consumers),
- maybe_incr_stats([{XName, 1}], return_not_delivered, State),
- record_confirm(MsgSeqNo, XName, State);
+ record_confirm(MsgSeqNo, XName,
+ maybe_incr_stats([{XName, 1}], return_not_delivered, State));
process_routing_result(routed, [], XName, MsgSeqNo, _, State) ->
record_confirm(MsgSeqNo, XName, State);
process_routing_result(routed, _, _, undefined, _, State) ->
State;
process_routing_result(routed, QPids, XName, MsgSeqNo, _, State) ->
- #ch{unconfirmed_mq = UMQ, unconfirmed_qm = UQM} = State,
+ #ch{unconfirmed_mq = UMQ} = State,
UMQ1 = gb_trees:insert(MsgSeqNo, {XName, gb_sets:from_list(QPids)}, UMQ),
SingletonSet = gb_sets:singleton(MsgSeqNo),
- UQM1 = lists:foldl(
- fun (QPid, UQM2) ->
- maybe_monitor(QPid),
- case gb_trees:lookup(QPid, UQM2) of
- {value, MsgSeqNos} ->
- MsgSeqNos1 = gb_sets:insert(MsgSeqNo, MsgSeqNos),
- gb_trees:update(QPid, MsgSeqNos1, UQM2);
- none ->
- gb_trees:insert(QPid, SingletonSet, UQM2)
- end
- end, UQM, QPids),
- State#ch{unconfirmed_mq = UMQ1, unconfirmed_qm = UQM1}.
+ lists:foldl(
+ fun (QPid, State0 = #ch{unconfirmed_qm = UQM}) ->
+ case gb_trees:lookup(QPid, UQM) of
+ {value, MsgSeqNos} ->
+ MsgSeqNos1 = gb_sets:insert(MsgSeqNo, MsgSeqNos),
+ UQM1 = gb_trees:update(QPid, MsgSeqNos1, UQM),
+ State0#ch{unconfirmed_qm = UQM1};
+ none ->
+ UQM1 = gb_trees:insert(QPid, SingletonSet, UQM),
+ monitor_queue(QPid, State0#ch{unconfirmed_qm = UQM1})
+ end
+ end, State#ch{unconfirmed_mq = UMQ1}, QPids).
lock_message(true, MsgStruct, State = #ch{unacked_message_q = UAMQ}) ->
State#ch{unacked_message_q = queue:in(MsgStruct, UAMQ)};
@@ -1372,11 +1420,13 @@ send_nacks(_, State) ->
maybe_complete_tx(State#ch{tx_status = failed}).
send_confirms(State = #ch{tx_status = none, confirmed = C}) ->
- C1 = lists:append(C),
- MsgSeqNos = [ begin maybe_incr_stats([{ExchangeName, 1}], confirm, State),
- MsgSeqNo
- end || {MsgSeqNo, ExchangeName} <- C1 ],
- send_confirms(MsgSeqNos, State #ch{confirmed = []});
+ {MsgSeqNos, State1} =
+ lists:foldl(fun ({MsgSeqNo, ExchangeName}, {MSNs, State0}) ->
+ {[MsgSeqNo | MSNs],
+ maybe_incr_stats([{ExchangeName, 1}], confirm,
+ State0)}
+ end, {[], State}, lists:append(C)),
+ send_confirms(MsgSeqNos, State1 #ch{confirmed = []});
send_confirms(State) ->
maybe_complete_tx(State).
@@ -1447,39 +1497,35 @@ i(messages_uncommitted, #ch{uncommitted_message_q = TMQ}) ->
queue:len(TMQ);
i(acks_uncommitted, #ch{uncommitted_ack_q = TAQ}) ->
queue:len(TAQ);
-i(prefetch_count, #ch{limiter_pid = LimiterPid}) ->
- rabbit_limiter:get_limit(LimiterPid);
-i(client_flow_blocked, #ch{limiter_pid = LimiterPid}) ->
- rabbit_limiter:is_blocked(LimiterPid);
+i(prefetch_count, #ch{limiter = Limiter}) ->
+ rabbit_limiter:get_limit(Limiter);
+i(client_flow_blocked, #ch{limiter = Limiter}) ->
+ rabbit_limiter:is_blocked(Limiter);
i(Item, _) ->
throw({bad_argument, Item}).
maybe_incr_redeliver_stats(true, QPid, State) ->
maybe_incr_stats([{QPid, 1}], redeliver, State);
-maybe_incr_redeliver_stats(_, _, _) ->
- ok.
+maybe_incr_redeliver_stats(_, _, State) ->
+ State.
-maybe_incr_stats(QXIncs, Measure, #ch{stats_timer = StatsTimer}) ->
+maybe_incr_stats(QXIncs, Measure, State = #ch{stats_timer = StatsTimer}) ->
case rabbit_event:stats_level(StatsTimer) of
- fine -> [incr_stats(QX, Inc, Measure) || {QX, Inc} <- QXIncs];
- _ -> ok
+ fine -> lists:foldl(fun ({QX, Inc}, State0) ->
+ incr_stats(QX, Inc, Measure, State0)
+ end, State, QXIncs);
+ _ -> State
end.
-incr_stats({QPid, _} = QX, Inc, Measure) ->
- maybe_monitor(QPid),
- update_measures(queue_exchange_stats, QX, Inc, Measure);
-incr_stats(QPid, Inc, Measure) when is_pid(QPid) ->
- maybe_monitor(QPid),
- update_measures(queue_stats, QPid, Inc, Measure);
-incr_stats(X, Inc, Measure) ->
- update_measures(exchange_stats, X, Inc, Measure).
-
-maybe_monitor(QPid) ->
- case get({monitoring, QPid}) of
- undefined -> erlang:monitor(process, QPid),
- put({monitoring, QPid}, true);
- _ -> ok
- end.
+incr_stats({QPid, _} = QX, Inc, Measure, State) ->
+ update_measures(queue_exchange_stats, QX, Inc, Measure),
+ monitor_queue(QPid, State);
+incr_stats(QPid, Inc, Measure, State) when is_pid(QPid) ->
+ update_measures(queue_stats, QPid, Inc, Measure),
+ monitor_queue(QPid, State);
+incr_stats(X, Inc, Measure, State) ->
+ update_measures(exchange_stats, X, Inc, Measure),
+ State.
update_measures(Type, QX, Inc, Measure) ->
Measures = case get({Type, QX}) of
@@ -1493,10 +1539,10 @@ update_measures(Type, QX, Inc, Measure) ->
put({Type, QX},
orddict:store(Measure, Cur + Inc, Measures)).
-internal_emit_stats(State) ->
- internal_emit_stats(State, []).
+emit_stats(State) ->
+ emit_stats(State, []).
-internal_emit_stats(State = #ch{stats_timer = StatsTimer}, Extra) ->
+emit_stats(State = #ch{stats_timer = StatsTimer}, Extra) ->
CoarseStats = infos(?STATISTICS_KEYS, State),
case rabbit_event:stats_level(StatsTimer) of
coarse ->
@@ -1515,7 +1561,6 @@ internal_emit_stats(State = #ch{stats_timer = StatsTimer}, Extra) ->
end.
erase_queue_stats(QPid) ->
- erase({monitoring, QPid}),
erase({queue_stats, QPid}),
[erase({queue_exchange_stats, QX}) ||
{{queue_exchange_stats, QX = {QPid0, _}}, _} <- get(), QPid =:= QPid0].
diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl
index 65ccca02..a19b6bfd 100644
--- a/src/rabbit_channel_sup.erl
+++ b/src/rabbit_channel_sup.erl
@@ -47,47 +47,44 @@
start_link({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, User, VHost,
Capabilities, Collector}) ->
- {ok, SupPid} = supervisor2:start_link(?MODULE, []),
- {ok, WriterPid} =
- supervisor2:start_child(
- SupPid,
- {writer, {rabbit_writer, start_link,
- [Sock, Channel, FrameMax, Protocol, ReaderPid]},
- intrinsic, ?MAX_WAIT, worker, [rabbit_writer]}),
+ {ok, SupPid} = supervisor2:start_link(?MODULE,
+ {tcp, Sock, Channel, FrameMax,
+ ReaderPid, Protocol}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ [WriterPid] = supervisor2:find_child(SupPid, writer),
{ok, ChannelPid} =
supervisor2:start_child(
SupPid,
{channel, {rabbit_channel, start_link,
[Channel, ReaderPid, WriterPid, ReaderPid, Protocol,
User, VHost, Capabilities, Collector,
- start_limiter_fun(SupPid)]},
+ rabbit_limiter:make_token(LimiterPid)]},
intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}),
{ok, AState} = rabbit_command_assembler:init(Protocol),
{ok, SupPid, {ChannelPid, AState}};
start_link({direct, Channel, ClientChannelPid, ConnPid, Protocol, User, VHost,
Capabilities, Collector}) ->
- {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, SupPid} = supervisor2:start_link(?MODULE, direct),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
{ok, ChannelPid} =
supervisor2:start_child(
SupPid,
{channel, {rabbit_channel, start_link,
[Channel, ClientChannelPid, ClientChannelPid, ConnPid,
Protocol, User, VHost, Capabilities, Collector,
- start_limiter_fun(SupPid)]},
+ rabbit_limiter:make_token(LimiterPid)]},
intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}),
{ok, SupPid, {ChannelPid, none}}.
%%----------------------------------------------------------------------------
-init([]) ->
- {ok, {{one_for_all, 0, 1}, []}}.
-
-start_limiter_fun(SupPid) ->
- fun (UnackedCount) ->
- Me = self(),
- {ok, _Pid} =
- supervisor2:start_child(
- SupPid,
- {limiter, {rabbit_limiter, start_link, [Me, UnackedCount]},
- transient, ?MAX_WAIT, worker, [rabbit_limiter]})
- end.
+init(Type) ->
+ {ok, {{one_for_all, 0, 1}, child_specs(Type)}}.
+
+child_specs({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol}) ->
+ [{writer, {rabbit_writer, start_link,
+ [Sock, Channel, FrameMax, Protocol, ReaderPid]},
+ intrinsic, ?MAX_WAIT, worker, [rabbit_writer]} | child_specs(direct)];
+child_specs(direct) ->
+ [{limiter, {rabbit_limiter, start_link, []},
+ transient, ?MAX_WAIT, worker, [rabbit_limiter]}].
diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl
index 15e92542..dfb400e3 100644
--- a/src/rabbit_client_sup.erl
+++ b/src/rabbit_client_sup.erl
@@ -28,8 +28,7 @@
-ifdef(use_specs).
--spec(start_link/1 :: (mfa()) ->
- rabbit_types:ok_pid_or_error()).
+-spec(start_link/1 :: (mfa()) -> rabbit_types:ok_pid_or_error()).
-spec(start_link/2 :: ({'local', atom()}, mfa()) ->
rabbit_types:ok_pid_or_error()).
diff --git a/src/rabbit_command_assembler.erl b/src/rabbit_command_assembler.erl
index 07036ce8..a0953eab 100644
--- a/src/rabbit_command_assembler.erl
+++ b/src/rabbit_command_assembler.erl
@@ -22,8 +22,12 @@
%%----------------------------------------------------------------------------
+%%----------------------------------------------------------------------------
+
-ifdef(use_specs).
+-export_type([frame/0]).
+
-type(frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
?FRAME_TRACE | ?FRAME_HEARTBEAT).
diff --git a/src/rabbit_control.erl b/src/rabbit_control.erl
index 6eb1aaba..1163ae9d 100644
--- a/src/rabbit_control.erl
+++ b/src/rabbit_control.erl
@@ -20,7 +20,6 @@
-export([start/0, stop/0, action/5, diagnostics/1]).
-define(RPC_TIMEOUT, infinity).
--define(WAIT_FOR_VM_ATTEMPTS, 5).
-define(QUIET_OPT, "-q").
-define(NODE_OPT, "-n").
@@ -191,9 +190,9 @@ action(force_cluster, Node, ClusterNodeSs, _Opts, Inform) ->
[Node, ClusterNodes]),
rpc_call(Node, rabbit_mnesia, force_cluster, [ClusterNodes]);
-action(wait, Node, [], _Opts, Inform) ->
+action(wait, Node, [PidFile], _Opts, Inform) ->
Inform("Waiting for ~p", [Node]),
- wait_for_application(Node, ?WAIT_FOR_VM_ATTEMPTS);
+ wait_for_application(Node, PidFile, Inform);
action(status, Node, [], _Opts, Inform) ->
Inform("Status of node ~p", [Node]),
@@ -354,23 +353,63 @@ action(report, Node, _Args, _Opts, Inform) ->
%%----------------------------------------------------------------------------
-wait_for_application(Node, Attempts) ->
- case rpc_call(Node, application, which_applications, [infinity]) of
- {badrpc, _} = E -> case Attempts of
- 0 -> E;
- _ -> wait_for_application0(Node, Attempts - 1)
- end;
- Apps -> case proplists:is_defined(rabbit, Apps) of
- %% We've seen the node up; if it goes down
- %% die immediately.
- true -> ok;
- false -> wait_for_application0(Node, 0)
- end
+wait_for_application(Node, PidFile, Inform) ->
+ Pid = wait_and_read_pid_file(PidFile),
+ Inform("pid is ~s", [Pid]),
+ wait_for_application(Node, Pid).
+
+wait_for_application(Node, Pid) ->
+ case process_up(Pid) of
+ true -> case rabbit:is_running(Node) of
+ true -> ok;
+ false -> timer:sleep(1000),
+ wait_for_application(Node, Pid)
+ end;
+ false -> {error, process_not_running}
end.
-wait_for_application0(Node, Attempts) ->
- timer:sleep(1000),
- wait_for_application(Node, Attempts).
+wait_and_read_pid_file(PidFile) ->
+ case file:read_file(PidFile) of
+ {ok, Bin} -> string:strip(binary_to_list(Bin), right, $\n);
+ {error, enoent} -> timer:sleep(500),
+ wait_and_read_pid_file(PidFile);
+ {error, _} = E -> exit({error, {could_not_read_pid, E}})
+ end.
+
+% Test using some OS clunkiness since we shouldn't trust
+% rpc:call(os, getpid, []) at this point
+process_up(Pid) ->
+ with_os([{unix, fun () ->
+ system("ps -p " ++ Pid
+ ++ " >/dev/null 2>&1") =:= 0
+ end},
+ {win32, fun () ->
+ Res = os:cmd("tasklist /nh /fi \"pid eq " ++
+ Pid ++ "\" 2>&1"),
+ case re:run(Res, "erl\\.exe", [{capture, none}]) of
+ match -> true;
+ _ -> false
+ end
+ end}]).
+
+with_os(Handlers) ->
+ {OsFamily, _} = os:type(),
+ case proplists:get_value(OsFamily, Handlers) of
+ undefined -> throw({unsupported_os, OsFamily});
+ Handler -> Handler()
+ end.
+
+% Like system(3)
+system(Cmd) ->
+ ShCmd = "sh -c '" ++ escape_quotes(Cmd) ++ "'",
+ Port = erlang:open_port({spawn, ShCmd}, [exit_status,nouse_stdio]),
+ receive {Port, {exit_status, Status}} -> Status end.
+
+% Escape the quotes in a shell command so that it can be used in "sh -c 'cmd'"
+escape_quotes(Cmd) ->
+ lists:flatten(lists:map(fun ($') -> "'\\''"; (Ch) -> Ch end, Cmd)).
+
+%%----------------------------------------------------------------------------
default_if_empty(List, Default) when is_list(List) ->
if List == [] -> Default;
diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl
index 7ff534ee..6f9a4650 100644
--- a/src/rabbit_direct.erl
+++ b/src/rabbit_direct.erl
@@ -16,7 +16,10 @@
-module(rabbit_direct).
--export([boot/0, connect/4, start_channel/8, disconnect/1]).
+-export([boot/0, force_event_refresh/0, list/0, connect/5,
+ start_channel/8, disconnect/2]).
+%% Internal
+-export([list_local/0]).
-include("rabbit.hrl").
@@ -25,8 +28,12 @@
-ifdef(use_specs).
-spec(boot/0 :: () -> 'ok').
--spec(connect/4 :: (rabbit_types:username(), rabbit_types:vhost(),
- rabbit_types:protocol(), rabbit_event:event_props()) ->
+-spec(force_event_refresh/0 :: () -> 'ok').
+-spec(list/0 :: () -> [pid()]).
+-spec(list_local/0 :: () -> [pid()]).
+-spec(connect/5 :: (rabbit_types:username(), rabbit_types:vhost(),
+ rabbit_types:protocol(), pid(),
+ rabbit_event:event_props()) ->
{'ok', {rabbit_types:user(),
rabbit_framing:amqp_table()}}).
-spec(start_channel/8 ::
@@ -34,7 +41,7 @@
rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
pid()) -> {'ok', pid()}).
--spec(disconnect/1 :: (rabbit_event:event_props()) -> 'ok').
+-spec(disconnect/2 :: (pid(), rabbit_event:event_props()) -> 'ok').
-endif.
@@ -51,15 +58,27 @@ boot() ->
transient, infinity, supervisor, [rabbit_client_sup]}),
ok.
+force_event_refresh() ->
+ [Pid ! force_event_refresh || Pid<- list()],
+ ok.
+
+list_local() ->
+ pg_local:get_members(rabbit_direct).
+
+list() ->
+ rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:running_clustered_nodes(),
+ rabbit_direct, list_local, []).
+
%%----------------------------------------------------------------------------
-connect(Username, VHost, Protocol, Infos) ->
- case lists:keymember(rabbit, 1, application:which_applications()) of
+connect(Username, VHost, Protocol, Pid, Infos) ->
+ case rabbit:is_running() of
true ->
case rabbit_access_control:check_user_login(Username, []) of
{ok, User} ->
try rabbit_access_control:check_vhost_access(User, VHost) of
- ok -> rabbit_event:notify(connection_created, Infos),
+ ok -> ok = pg_local:join(rabbit_direct, Pid),
+ rabbit_event:notify(connection_created, Infos),
{ok, {User,
rabbit_reader:server_properties(Protocol)}}
catch
@@ -82,5 +101,6 @@ start_channel(Number, ClientChannelPid, ConnPid, Protocol, User, VHost,
Capabilities, Collector}]),
{ok, ChannelPid}.
-disconnect(Infos) ->
+disconnect(Pid, Infos) ->
+ pg_local:leave(rabbit_direct, Pid),
rabbit_event:notify(connection_closed, Infos).
diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl
index 93aad9e3..6e29ace7 100644
--- a/src/rabbit_error_logger.erl
+++ b/src/rabbit_error_logger.erl
@@ -27,6 +27,16 @@
-export([init/1, terminate/2, code_change/3, handle_call/2, handle_event/2,
handle_info/2]).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(boot/0 :: () -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
boot() ->
{ok, DefaultVHost} = application:get_env(default_vhost),
ok = error_logger:add_report_handler(?MODULE, [DefaultVHost]).
diff --git a/src/rabbit_error_logger_file_h.erl b/src/rabbit_error_logger_file_h.erl
index 7e9ebc4f..7b6e07c1 100644
--- a/src/rabbit_error_logger_file_h.erl
+++ b/src/rabbit_error_logger_file_h.erl
@@ -26,11 +26,16 @@
%% with the result of closing the old handler when swapping handlers.
%% The first init/1 additionally allows for simple log rotation
%% when the suffix is not the empty string.
+%% The original init/2 also opened the file in 'write' mode, thus
+%% overwriting old logs. To remedy this, init/2 from
+%% lib/stdlib/src/error_logger_file_h.erl from R14B3 was copied as
+%% init_file/2 and changed so that it opens the file in 'append' mode.
%% Used only when swapping handlers in log rotation
init({{File, Suffix}, []}) ->
- case rabbit_misc:append_file(File, Suffix) of
- ok -> ok;
+ case rabbit_file:append_file(File, Suffix) of
+ ok -> file:delete(File),
+ ok;
{error, Error} ->
rabbit_log:error("Failed to append contents of "
"log file '~s' to '~s':~n~p~n",
@@ -45,12 +50,31 @@ init({{File, _}, error}) ->
%% log rotation
init({File, []}) ->
init(File);
-init({File, _Type} = FileInfo) ->
- rabbit_misc:ensure_parent_dirs_exist(File),
- error_logger_file_h:init(FileInfo);
+%% Used only when taking over from the tty handler
+init({{File, []}, _}) ->
+ init(File);
+init({File, {error_logger, Buf}}) ->
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file(File, {error_logger, Buf});
init(File) ->
- rabbit_misc:ensure_parent_dirs_exist(File),
- error_logger_file_h:init(File).
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file(File, []).
+
+init_file(File, {error_logger, Buf}) ->
+ case init_file(File, error_logger) of
+ {ok, {Fd, File, PrevHandler}} ->
+ [handle_event(Event, {Fd, File, PrevHandler}) ||
+ {_, Event} <- lists:reverse(Buf)],
+ {ok, {Fd, File, PrevHandler}};
+ Error ->
+ Error
+ end;
+init_file(File, PrevHandler) ->
+ process_flag(trap_exit, true),
+ case file:open(File, [append]) of
+ {ok,Fd} -> {ok, {Fd, File, PrevHandler}};
+ Error -> Error
+ end.
handle_event(Event, State) ->
error_logger_file_h:handle_event(Event, State).
diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl
index 468f9293..bb765566 100644
--- a/src/rabbit_event.erl
+++ b/src/rabbit_event.erl
@@ -19,7 +19,7 @@
-include("rabbit.hrl").
-export([start_link/0]).
--export([init_stats_timer/0, ensure_stats_timer/2, stop_stats_timer/1]).
+-export([init_stats_timer/0, ensure_stats_timer/3, stop_stats_timer/1]).
-export([reset_stats_timer/1]).
-export([stats_level/1, if_enabled/2]).
-export([notify/2, notify_if/3]).
@@ -57,7 +57,7 @@
-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-spec(init_stats_timer/0 :: () -> state()).
--spec(ensure_stats_timer/2 :: (state(), timer_fun()) -> state()).
+-spec(ensure_stats_timer/3 :: (state(), pid(), term()) -> state()).
-spec(stop_stats_timer/1 :: (state()) -> state()).
-spec(reset_stats_timer/1 :: (state()) -> state()).
-spec(stats_level/1 :: (state()) -> level()).
@@ -80,7 +80,7 @@ start_link() ->
%% if_enabled(internal_emit_stats) - so we immediately send something
%%
%% On wakeup:
-%% ensure_stats_timer(Timer, emit_stats)
+%% ensure_stats_timer(Timer, Pid, emit_stats)
%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.)
%%
%% emit_stats:
@@ -99,13 +99,13 @@ init_stats_timer() ->
{ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
#state{level = StatsLevel, interval = Interval, timer = undefined}.
-ensure_stats_timer(State = #state{level = none}, _Fun) ->
+ensure_stats_timer(State = #state{level = none}, _Pid, _Msg) ->
State;
ensure_stats_timer(State = #state{interval = Interval,
- timer = undefined}, Fun) ->
- {ok, TRef} = timer:apply_after(Interval, erlang, apply, [Fun, []]),
+ timer = undefined}, Pid, Msg) ->
+ TRef = erlang:send_after(Interval, Pid, Msg),
State#state{timer = TRef};
-ensure_stats_timer(State, _Fun) ->
+ensure_stats_timer(State, _Pid, _Msg) ->
State.
stop_stats_timer(State = #state{level = none}) ->
@@ -113,7 +113,7 @@ stop_stats_timer(State = #state{level = none}) ->
stop_stats_timer(State = #state{timer = undefined}) ->
State;
stop_stats_timer(State = #state{timer = TRef}) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State#state{timer = undefined}.
reset_stats_timer(State) ->
diff --git a/src/rabbit_file.erl b/src/rabbit_file.erl
new file mode 100644
index 00000000..5cb8e7b6
--- /dev/null
+++ b/src/rabbit_file.erl
@@ -0,0 +1,282 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is VMware, Inc.
+%% Copyright (c) 2011 VMware, Inc. All rights reserved.
+%%
+
+-module(rabbit_file).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([is_file/1, is_dir/1, file_size/1, ensure_dir/1, wildcard/2, list_dir/1]).
+-export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]).
+-export([append_file/2, ensure_parent_dirs_exist/1]).
+-export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
+-export([lock_file/1]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(ok_or_error() :: rabbit_types:ok_or_error(any())).
+
+-spec(is_file/1 :: ((file:filename())) -> boolean()).
+-spec(is_dir/1 :: ((file:filename())) -> boolean()).
+-spec(file_size/1 :: ((file:filename())) -> non_neg_integer()).
+-spec(ensure_dir/1 :: ((file:filename())) -> ok_or_error()).
+-spec(wildcard/2 :: (string(), file:filename()) -> [file:filename()]).
+-spec(list_dir/1 :: (file:filename()) -> rabbit_types:ok_or_error2(
+ [file:filename()], any())).
+-spec(read_term_file/1 ::
+ (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())).
+-spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()).
+-spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()).
+-spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()).
+-spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()).
+-spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok').
+-spec(rename/2 ::
+ (file:filename(), file:filename()) -> ok_or_error()).
+-spec(delete/1 :: ([file:filename()]) -> ok_or_error()).
+-spec(recursive_delete/1 ::
+ ([file:filename()])
+ -> rabbit_types:ok_or_error({file:filename(), any()})).
+-spec(recursive_copy/2 ::
+ (file:filename(), file:filename())
+ -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})).
+-spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+is_file(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{type=regular}} -> true;
+ {ok, #file_info{type=directory}} -> true;
+ _ -> false
+ end.
+
+is_dir(Dir) -> is_dir_internal(read_file_info(Dir)).
+
+is_dir_no_handle(Dir) -> is_dir_internal(prim_file:read_file_info(Dir)).
+
+is_dir_internal({ok, #file_info{type=directory}}) -> true;
+is_dir_internal(_) -> false.
+
+file_size(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{size=Size}} -> Size;
+ _ -> 0
+ end.
+
+ensure_dir(File) -> with_fhc_handle(fun () -> ensure_dir_internal(File) end).
+
+ensure_dir_internal("/") ->
+ ok;
+ensure_dir_internal(File) ->
+ Dir = filename:dirname(File),
+ case is_dir_no_handle(Dir) of
+ true -> ok;
+ false -> ensure_dir_internal(Dir),
+ prim_file:make_dir(Dir)
+ end.
+
+wildcard(Pattern, Dir) ->
+ {ok, Files} = list_dir(Dir),
+ {ok, RE} = re:compile(Pattern, [anchored]),
+ [File || File <- Files, match =:= re:run(File, RE, [{capture, none}])].
+
+list_dir(Dir) -> with_fhc_handle(fun () -> prim_file:list_dir(Dir) end).
+
+read_file_info(File) ->
+ with_fhc_handle(fun () -> prim_file:read_file_info(File) end).
+
+with_fhc_handle(Fun) ->
+ ok = file_handle_cache:obtain(),
+ try Fun()
+ after ok = file_handle_cache:release()
+ end.
+
+read_term_file(File) ->
+ try
+ {ok, Data} = with_fhc_handle(fun () -> prim_file:read_file(File) end),
+ {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)),
+ TokenGroups = group_tokens(Tokens),
+ {ok, [begin
+ {ok, Term} = erl_parse:parse_term(Tokens1),
+ Term
+ end || Tokens1 <- TokenGroups]}
+ catch
+ error:{badmatch, Error} -> Error
+ end.
+
+group_tokens(Ts) -> [lists:reverse(G) || G <- group_tokens([], Ts)].
+
+group_tokens([], []) -> [];
+group_tokens(Cur, []) -> [Cur];
+group_tokens(Cur, [T = {dot, _} | Ts]) -> [[T | Cur] | group_tokens([], Ts)];
+group_tokens(Cur, [T | Ts]) -> group_tokens([T | Cur], Ts).
+
+write_term_file(File, Terms) ->
+ write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) ||
+ Term <- Terms])).
+
+write_file(Path, Data) -> write_file(Path, Data, []).
+
+%% write_file/3 and make_binary/1 are both based on corresponding
+%% functions in the kernel/file.erl module of the Erlang R14B02
+%% release, which is licensed under the EPL. That implementation of
+%% write_file/3 does not do an fsync prior to closing the file, hence
+%% the existence of this version. APIs are otherwise identical.
+write_file(Path, Data, Modes) ->
+ Modes1 = [binary, write | (Modes -- [binary, write])],
+ case make_binary(Data) of
+ Bin when is_binary(Bin) ->
+ with_fhc_handle(
+ fun () -> case prim_file:open(Path, Modes1) of
+ {ok, Hdl} -> try prim_file:write(Hdl, Bin) of
+ ok -> prim_file:sync(Hdl);
+ {error, _} = E -> E
+ after
+ prim_file:close(Hdl)
+ end;
+ {error, _} = E -> E
+ end
+ end);
+ {error, _} = E -> E
+ end.
+
+make_binary(Bin) when is_binary(Bin) ->
+ Bin;
+make_binary(List) ->
+ try
+ iolist_to_binary(List)
+ catch error:Reason ->
+ {error, Reason}
+ end.
+
+
+append_file(File, Suffix) ->
+ case read_file_info(File) of
+ {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix);
+ {error, enoent} -> append_file(File, 0, Suffix);
+ Error -> Error
+ end.
+
+append_file(_, _, "") ->
+ ok;
+append_file(File, 0, Suffix) ->
+ with_fhc_handle(fun () ->
+ case prim_file:open([File, Suffix], [append]) of
+ {ok, Fd} -> prim_file:close(Fd);
+ Error -> Error
+ end
+ end);
+append_file(File, _, Suffix) ->
+ case with_fhc_handle(fun () -> prim_file:read_file(File) end) of
+ {ok, Data} -> write_file([File, Suffix], Data, [append]);
+ Error -> Error
+ end.
+
+ensure_parent_dirs_exist(Filename) ->
+ case ensure_dir(Filename) of
+ ok -> ok;
+ {error, Reason} ->
+ throw({error, {cannot_create_parent_dirs, Filename, Reason}})
+ end.
+
+rename(Old, New) -> with_fhc_handle(fun () -> prim_file:rename(Old, New) end).
+
+delete(File) -> with_fhc_handle(fun () -> prim_file:delete(File) end).
+
+recursive_delete(Files) ->
+ with_fhc_handle(
+ fun () -> lists:foldl(fun (Path, ok) -> recursive_delete1(Path);
+ (_Path, {error, _Err} = Error) -> Error
+ end, ok, Files)
+ end).
+
+recursive_delete1(Path) ->
+ case is_dir_no_handle(Path) and not(is_symlink_no_handle(Path)) of
+ false -> case prim_file:delete(Path) of
+ ok -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ true -> case prim_file:list_dir(Path) of
+ {ok, FileNames} ->
+ case lists:foldl(
+ fun (FileName, ok) ->
+ recursive_delete1(
+ filename:join(Path, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames) of
+ ok ->
+ case prim_file:del_dir(Path) of
+ ok -> ok;
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ {error, _Err} = Error ->
+ Error
+ end;
+ {error, Err} ->
+ {error, {Path, Err}}
+ end
+ end.
+
+is_symlink_no_handle(File) ->
+ case prim_file:read_link(File) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+recursive_copy(Src, Dest) ->
+ %% Note that this uses the 'file' module and, hence, shouldn't be
+ %% run on many processes at once.
+ case is_dir(Src) of
+ false -> case file:copy(Src, Dest) of
+ {ok, _Bytes} -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Src, Dest, Err}}
+ end;
+ true -> case file:list_dir(Src) of
+ {ok, FileNames} ->
+ case file:make_dir(Dest) of
+ ok ->
+ lists:foldl(
+ fun (FileName, ok) ->
+ recursive_copy(
+ filename:join(Src, FileName),
+ filename:join(Dest, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames);
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end;
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end
+ end.
+
+%% TODO: When we stop supporting Erlang prior to R14, this should be
+%% replaced with file:open [write, exclusive]
+lock_file(Path) ->
+ case is_file(Path) of
+ true -> {error, eexist};
+ false -> with_fhc_handle(
+ fun () -> {ok, Lock} = prim_file:open(Path, [write]),
+ ok = prim_file:close(Lock)
+ end)
+ end.
diff --git a/src/rabbit_guid.erl b/src/rabbit_guid.erl
index 234bc55b..cf3fea1a 100644
--- a/src/rabbit_guid.erl
+++ b/src/rabbit_guid.erl
@@ -52,13 +52,13 @@ start_link() ->
update_disk_serial() ->
Filename = filename:join(rabbit_mnesia:dir(), ?SERIAL_FILENAME),
- Serial = case rabbit_misc:read_term_file(Filename) of
+ Serial = case rabbit_file:read_term_file(Filename) of
{ok, [Num]} -> Num;
{error, enoent} -> 0;
{error, Reason} ->
throw({error, {cannot_read_serial_file, Filename, Reason}})
end,
- case rabbit_misc:write_term_file(Filename, [Serial + 1]) of
+ case rabbit_file:write_term_file(Filename, [Serial + 1]) of
ok -> ok;
{error, Reason1} ->
throw({error, {cannot_write_serial_file, Filename, Reason1}})
diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl
index 8f9ab032..24468a01 100644
--- a/src/rabbit_limiter.erl
+++ b/src/rabbit_limiter.erl
@@ -20,27 +20,36 @@
-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
handle_info/2, prioritise_call/3]).
--export([start_link/2]).
+-export([start_link/0, make_token/0, make_token/1, is_enabled/1, enable/2,
+ disable/1]).
-export([limit/2, can_send/3, ack/2, register/2, unregister/2]).
-export([get_limit/1, block/1, unblock/1, is_blocked/1]).
%%----------------------------------------------------------------------------
--ifdef(use_specs).
+-record(token, {pid, enabled}).
--type(maybe_pid() :: pid() | 'undefined').
+-ifdef(use_specs).
--spec(start_link/2 :: (pid(), non_neg_integer()) ->
- rabbit_types:ok_pid_or_error()).
--spec(limit/2 :: (maybe_pid(), non_neg_integer()) -> 'ok' | 'stopped').
--spec(can_send/3 :: (maybe_pid(), pid(), boolean()) -> boolean()).
--spec(ack/2 :: (maybe_pid(), non_neg_integer()) -> 'ok').
--spec(register/2 :: (maybe_pid(), pid()) -> 'ok').
--spec(unregister/2 :: (maybe_pid(), pid()) -> 'ok').
--spec(get_limit/1 :: (maybe_pid()) -> non_neg_integer()).
--spec(block/1 :: (maybe_pid()) -> 'ok').
--spec(unblock/1 :: (maybe_pid()) -> 'ok' | 'stopped').
--spec(is_blocked/1 :: (maybe_pid()) -> boolean()).
+-export_type([token/0]).
+
+-opaque(token() :: #token{}).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(make_token/0 :: () -> token()).
+-spec(make_token/1 :: ('undefined' | pid()) -> token()).
+-spec(is_enabled/1 :: (token()) -> boolean()).
+-spec(enable/2 :: (token(), non_neg_integer()) -> token()).
+-spec(disable/1 :: (token()) -> token()).
+-spec(limit/2 :: (token(), non_neg_integer()) -> 'ok' | {'disabled', token()}).
+-spec(can_send/3 :: (token(), pid(), boolean()) -> boolean()).
+-spec(ack/2 :: (token(), non_neg_integer()) -> 'ok').
+-spec(register/2 :: (token(), pid()) -> 'ok').
+-spec(unregister/2 :: (token(), pid()) -> 'ok').
+-spec(get_limit/1 :: (token()) -> non_neg_integer()).
+-spec(block/1 :: (token()) -> 'ok').
+-spec(unblock/1 :: (token()) -> 'ok' | {'disabled', token()}).
+-spec(is_blocked/1 :: (token()) -> boolean()).
-endif.
@@ -59,63 +68,63 @@
%% API
%%----------------------------------------------------------------------------
-start_link(ChPid, UnackedMsgCount) ->
- gen_server2:start_link(?MODULE, [ChPid, UnackedMsgCount], []).
+start_link() -> gen_server2:start_link(?MODULE, [], []).
+
+make_token() -> make_token(undefined).
+make_token(Pid) -> #token{pid = Pid, enabled = false}.
+
+is_enabled(#token{enabled = Enabled}) -> Enabled.
+
+enable(#token{pid = Pid} = Token, Volume) ->
+ gen_server2:call(Pid, {enable, Token, self(), Volume}, infinity).
-limit(undefined, 0) ->
- ok;
-limit(LimiterPid, PrefetchCount) ->
- gen_server2:call(LimiterPid, {limit, PrefetchCount}, infinity).
+disable(#token{pid = Pid} = Token) ->
+ gen_server2:call(Pid, {disable, Token}, infinity).
+
+limit(Limiter, PrefetchCount) ->
+ maybe_call(Limiter, {limit, PrefetchCount, Limiter}, ok).
%% Ask the limiter whether the queue can deliver a message without
-%% breaching a limit
-can_send(undefined, _QPid, _AckRequired) ->
- true;
-can_send(LimiterPid, QPid, AckRequired) ->
+%% breaching a limit. Note that we don't use maybe_call here in order
+%% to avoid always going through with_exit_handler/2, even when the
+%% limiter is disabled.
+can_send(#token{pid = Pid, enabled = true}, QPid, AckRequired) ->
rabbit_misc:with_exit_handler(
fun () -> true end,
- fun () -> gen_server2:call(LimiterPid, {can_send, QPid, AckRequired},
- infinity) end).
+ fun () ->
+ gen_server2:call(Pid, {can_send, QPid, AckRequired}, infinity)
+ end);
+can_send(_, _, _) ->
+ true.
%% Let the limiter know that the channel has received some acks from a
%% consumer
-ack(undefined, _Count) -> ok;
-ack(LimiterPid, Count) -> gen_server2:cast(LimiterPid, {ack, Count}).
+ack(Limiter, Count) -> maybe_cast(Limiter, {ack, Count}).
-register(undefined, _QPid) -> ok;
-register(LimiterPid, QPid) -> gen_server2:cast(LimiterPid, {register, QPid}).
+register(Limiter, QPid) -> maybe_cast(Limiter, {register, QPid}).
-unregister(undefined, _QPid) -> ok;
-unregister(LimiterPid, QPid) -> gen_server2:cast(LimiterPid, {unregister, QPid}).
+unregister(Limiter, QPid) -> maybe_cast(Limiter, {unregister, QPid}).
-get_limit(undefined) ->
- 0;
-get_limit(Pid) ->
+get_limit(Limiter) ->
rabbit_misc:with_exit_handler(
fun () -> 0 end,
- fun () -> gen_server2:call(Pid, get_limit, infinity) end).
+ fun () -> maybe_call(Limiter, get_limit, 0) end).
-block(undefined) ->
- ok;
-block(LimiterPid) ->
- gen_server2:call(LimiterPid, block, infinity).
+block(Limiter) ->
+ maybe_call(Limiter, block, ok).
-unblock(undefined) ->
- ok;
-unblock(LimiterPid) ->
- gen_server2:call(LimiterPid, unblock, infinity).
+unblock(Limiter) ->
+ maybe_call(Limiter, {unblock, Limiter}, ok).
-is_blocked(undefined) ->
- false;
-is_blocked(LimiterPid) ->
- gen_server2:call(LimiterPid, is_blocked, infinity).
+is_blocked(Limiter) ->
+ maybe_call(Limiter, is_blocked, false).
%%----------------------------------------------------------------------------
%% gen_server callbacks
%%----------------------------------------------------------------------------
-init([ChPid, UnackedMsgCount]) ->
- {ok, #lim{ch_pid = ChPid, volume = UnackedMsgCount}}.
+init([]) ->
+ {ok, #lim{}}.
prioritise_call(get_limit, _From, _State) -> 9;
prioritise_call(_Msg, _From, _State) -> 0.
@@ -135,23 +144,33 @@ handle_call({can_send, QPid, AckRequired}, _From,
handle_call(get_limit, _From, State = #lim{prefetch_count = PrefetchCount}) ->
{reply, PrefetchCount, State};
-handle_call({limit, PrefetchCount}, _From, State) ->
+handle_call({limit, PrefetchCount, Token}, _From, State) ->
case maybe_notify(State, State#lim{prefetch_count = PrefetchCount}) of
- {cont, State1} -> {reply, ok, State1};
- {stop, State1} -> {stop, normal, stopped, State1}
+ {cont, State1} ->
+ {reply, ok, State1};
+ {stop, State1} ->
+ {reply, {disabled, Token#token{enabled = false}}, State1}
end;
handle_call(block, _From, State) ->
{reply, ok, State#lim{blocked = true}};
-handle_call(unblock, _From, State) ->
+handle_call({unblock, Token}, _From, State) ->
case maybe_notify(State, State#lim{blocked = false}) of
- {cont, State1} -> {reply, ok, State1};
- {stop, State1} -> {stop, normal, stopped, State1}
+ {cont, State1} ->
+ {reply, ok, State1};
+ {stop, State1} ->
+ {reply, {disabled, Token#token{enabled = false}}, State1}
end;
handle_call(is_blocked, _From, State) ->
- {reply, blocked(State), State}.
+ {reply, blocked(State), State};
+
+handle_call({enable, Token, Channel, Volume}, _From, State) ->
+ {reply, Token#token{enabled = true},
+ State#lim{ch_pid = Channel, volume = Volume}};
+handle_call({disable, Token}, _From, State) ->
+ {reply, Token#token{enabled = false}, State}.
handle_cast({ack, Count}, State = #lim{volume = Volume}) ->
NewVolume = if Volume == 0 -> 0;
@@ -190,6 +209,16 @@ maybe_notify(OldState, NewState) ->
false -> {cont, NewState}
end.
+maybe_call(#token{pid = Pid, enabled = true}, Call, _Default) ->
+ gen_server2:call(Pid, Call, infinity);
+maybe_call(_, _Call, Default) ->
+ Default.
+
+maybe_cast(#token{pid = Pid, enabled = true}, Cast) ->
+ gen_server2:cast(Pid, Cast);
+maybe_cast(_, _Call) ->
+ ok.
+
limit_reached(#lim{prefetch_count = Limit, volume = Volume}) ->
Limit =/= 0 andalso Volume >= Limit.
@@ -227,7 +256,8 @@ notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) ->
%% thus ensuring that each queue has an equal chance of
%% being notified first.
{L1, L2} = lists:split(random:uniform(L), QList),
- [ok = rabbit_amqqueue:unblock(Q, ChPid) || Q <- L2 ++ L1],
+ [[ok = rabbit_amqqueue:unblock(Q, ChPid) || Q <- L3]
+ || L3 <- [L2, L1]],
ok
end,
State#lim{queues = NewQueues}.
diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl
index 8207d6bc..558e0957 100644
--- a/src/rabbit_log.erl
+++ b/src/rabbit_log.erl
@@ -42,6 +42,8 @@
-spec(error/1 :: (string()) -> 'ok').
-spec(error/2 :: (string(), [any()]) -> 'ok').
+-spec(message/4 :: (_,_,_,_) -> 'ok').
+
-endif.
%%----------------------------------------------------------------------------
diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl
index f6664a27..8ed2bede 100644
--- a/src/rabbit_mirror_queue_coordinator.erl
+++ b/src/rabbit_mirror_queue_coordinator.erl
@@ -16,7 +16,7 @@
-module(rabbit_mirror_queue_coordinator).
--export([start_link/3, get_gm/1, ensure_monitoring/2]).
+-export([start_link/4, get_gm/1, ensure_monitoring/2]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3]).
@@ -32,15 +32,17 @@
-record(state, { q,
gm,
monitors,
- death_fun
+ death_fun,
+ length_fun
}).
-define(ONE_SECOND, 1000).
-ifdef(use_specs).
--spec(start_link/3 :: (rabbit_types:amqqueue(), pid() | 'undefined',
- rabbit_mirror_queue_master:death_fun()) ->
+-spec(start_link/4 :: (rabbit_types:amqqueue(), pid() | 'undefined',
+ rabbit_mirror_queue_master:death_fun(),
+ rabbit_mirror_queue_master:length_fun()) ->
rabbit_types:ok_pid_or_error()).
-spec(get_gm/1 :: (pid()) -> pid()).
-spec(ensure_monitoring/2 :: (pid(), [pid()]) -> 'ok').
@@ -53,7 +55,7 @@
%%
%% A queue with mirrors consists of the following:
%%
-%% #amqqueue{ pid, mirror_pids }
+%% #amqqueue{ pid, slave_pids }
%% | |
%% +----------+ +-------+--------------+-----------...etc...
%% | | |
@@ -138,9 +140,28 @@
%% state of the master. The detection of the sync-status of a slave is
%% done entirely based on length: if the slave and the master both
%% agree on the length of the queue after the fetch of the head of the
-%% queue, then the queues must be in sync. The only other possibility
-%% is that the slave's queue is shorter, and thus the fetch should be
-%% ignored.
+%% queue (or a 'set_length' results in a slave having to drop some
+%% messages from the head of its queue), then the queues must be in
+%% sync. The only other possibility is that the slave's queue is
+%% shorter, and thus the fetch should be ignored. In case slaves are
+%% joined to an empty queue which only goes on to receive publishes,
+%% they start by asking the master to broadcast its length. This is
+%% enough for slaves to always be able to work out when their head
+%% does not differ from the master (and is much simpler and cheaper
+%% than getting the master to hang on to the guid of the msg at the
+%% head of its queue). When a slave is promoted to a master, it
+%% unilaterally broadcasts its length, in order to solve the problem
+%% of length requests from new slaves being unanswered by a dead
+%% master.
+%%
+%% Obviously, due to the async nature of communication across gm, the
+%% slaves can fall behind. This does not matter from a sync pov: if
+%% they fall behind and the master dies then a) no publishes are lost
+%% because all publishes go to all mirrors anyway; b) the worst that
+%% happens is that acks get lost and so messages come back to
+%% life. This is no worse than normal given you never get confirmation
+%% that an ack has been received (not quite true with QoS-prefetch,
+%% but close enough for jazz).
%%
%% Because acktags are issued by the bq independently, and because
%% there is no requirement for the master and all slaves to use the
@@ -279,8 +300,8 @@
%%
%%----------------------------------------------------------------------------
-start_link(Queue, GM, DeathFun) ->
- gen_server2:start_link(?MODULE, [Queue, GM, DeathFun], []).
+start_link(Queue, GM, DeathFun, LengthFun) ->
+ gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, LengthFun], []).
get_gm(CPid) ->
gen_server2:call(CPid, get_gm, infinity).
@@ -292,7 +313,7 @@ ensure_monitoring(CPid, Pids) ->
%% gen_server
%% ---------------------------------------------------------------------------
-init([#amqqueue { name = QueueName } = Q, GM, DeathFun]) ->
+init([#amqqueue { name = QueueName } = Q, GM, DeathFun, LengthFun]) ->
GM1 = case GM of
undefined ->
{ok, GM2} = gm:start_link(QueueName, ?MODULE, [self()]),
@@ -306,10 +327,11 @@ init([#amqqueue { name = QueueName } = Q, GM, DeathFun]) ->
end,
{ok, _TRef} =
timer:apply_interval(?ONE_SECOND, gm, broadcast, [GM1, heartbeat]),
- {ok, #state { q = Q,
- gm = GM1,
- monitors = dict:new(),
- death_fun = DeathFun },
+ {ok, #state { q = Q,
+ gm = GM1,
+ monitors = dict:new(),
+ death_fun = DeathFun,
+ length_fun = LengthFun },
hibernate,
{backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
@@ -317,18 +339,21 @@ handle_call(get_gm, _From, State = #state { gm = GM }) ->
reply(GM, State).
handle_cast({gm_deaths, Deaths},
- State = #state { q = #amqqueue { name = QueueName } }) ->
- rabbit_log:info("Mirrored-queue (~s): Master ~s saw deaths of mirrors ~s~n",
- [rabbit_misc:rs(QueueName),
- rabbit_misc:pid_to_string(self()),
- [[rabbit_misc:pid_to_string(Pid), $ ] || Pid <- Deaths]]),
+ State = #state { q = #amqqueue { name = QueueName, pid = MPid } })
+ when node(MPid) =:= node() ->
case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of
- {ok, Pid} when node(Pid) =:= node() ->
+ {ok, MPid, DeadPids} ->
+ rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
+ DeadPids),
noreply(State);
{error, not_found} ->
{stop, normal, State}
end;
+handle_cast(request_length, State = #state { length_fun = LengthFun }) ->
+ ok = LengthFun(),
+ noreply(State);
+
handle_cast({ensure_monitoring, Pids},
State = #state { monitors = Monitors }) ->
Monitors1 =
@@ -343,13 +368,12 @@ handle_cast({ensure_monitoring, Pids},
handle_info({'DOWN', _MonitorRef, process, Pid, _Reason},
State = #state { monitors = Monitors,
- death_fun = Fun }) ->
- noreply(
- case dict:is_key(Pid, Monitors) of
- false -> State;
- true -> ok = Fun(Pid),
- State #state { monitors = dict:erase(Pid, Monitors) }
- end);
+ death_fun = DeathFun }) ->
+ noreply(case dict:is_key(Pid, Monitors) of
+ false -> State;
+ true -> ok = DeathFun(Pid),
+ State #state { monitors = dict:erase(Pid, Monitors) }
+ end);
handle_info(Msg, State) ->
{stop, {unexpected_info, Msg}, State}.
@@ -379,6 +403,8 @@ members_changed([CPid], _Births, Deaths) ->
handle_msg([_CPid], _From, heartbeat) ->
ok;
+handle_msg([CPid], _From, request_length = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
ok = gen_server2:cast(CPid, Msg);
handle_msg([_CPid], _From, _Msg) ->
diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl
index 532911f2..5fc6341f 100644
--- a/src/rabbit_mirror_queue_master.erl
+++ b/src/rabbit_mirror_queue_master.erl
@@ -25,7 +25,7 @@
-export([start/1, stop/0]).
--export([promote_backing_queue_state/6, sender_death_fun/0]).
+-export([promote_backing_queue_state/6, sender_death_fun/0, length_fun/0]).
-behaviour(rabbit_backing_queue).
@@ -44,9 +44,10 @@
-ifdef(use_specs).
--export_type([death_fun/0]).
+-export_type([death_fun/0, length_fun/0]).
-type(death_fun() :: fun ((pid()) -> 'ok')).
+-type(length_fun() :: fun (() -> 'ok')).
-type(master_state() :: #state { gm :: pid(),
coordinator :: pid(),
backing_queue :: atom(),
@@ -58,9 +59,14 @@
known_senders :: set()
}).
+-type(ack() :: non_neg_integer()).
+-type(state() :: master_state()).
+-include("rabbit_backing_queue_spec.hrl").
+
-spec(promote_backing_queue_state/6 ::
(pid(), atom(), any(), pid(), dict(), [pid()]) -> master_state()).
-spec(sender_death_fun/0 :: () -> death_fun()).
+-spec(length_fun/0 :: () -> length_fun()).
-endif.
@@ -83,7 +89,7 @@ stop() ->
init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover,
AsyncCallback) ->
{ok, CPid} = rabbit_mirror_queue_coordinator:start_link(
- Q, undefined, sender_death_fun()),
+ Q, undefined, sender_death_fun(), length_fun()),
GM = rabbit_mirror_queue_coordinator:get_gm(CPid),
MNodes1 =
(case MNodes of
@@ -94,6 +100,7 @@ init(#amqqueue { name = QName, mirror_nodes = MNodes } = Q, Recover,
[rabbit_mirror_queue_misc:add_mirror(QName, Node) || Node <- MNodes1],
{ok, BQ} = application:get_env(backing_queue_module),
BQS = BQ:init(Q, Recover, AsyncCallback),
+ ok = gm:broadcast(GM, {length, BQ:len(BQS)}),
#state { gm = GM,
coordinator = CPid,
backing_queue = BQ,
@@ -349,11 +356,13 @@ discard(Msg = #basic_message { id = MsgId }, ChPid,
%% ---------------------------------------------------------------------------
promote_backing_queue_state(CPid, BQ, BQS, GM, SeenStatus, KS) ->
+ Len = BQ:len(BQS),
+ ok = gm:broadcast(GM, {length, Len}),
#state { gm = GM,
coordinator = CPid,
backing_queue = BQ,
backing_queue_state = BQS,
- set_delivered = BQ:len(BQS),
+ set_delivered = Len,
seen_status = SeenStatus,
confirmed = [],
ack_msg_id = dict:new(),
@@ -371,9 +380,18 @@ sender_death_fun() ->
end)
end.
-%% ---------------------------------------------------------------------------
-%% Helpers
-%% ---------------------------------------------------------------------------
+length_fun() ->
+ Self = self(),
+ fun () ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {length, BQ:len(BQS)}),
+ State
+ end)
+ end.
maybe_store_acktag(undefined, _MsgId, AM) ->
AM;
diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl
index 6a9f733e..baebc52b 100644
--- a/src/rabbit_mirror_queue_misc.erl
+++ b/src/rabbit_mirror_queue_misc.erl
@@ -17,10 +17,31 @@
-module(rabbit_mirror_queue_misc).
-export([remove_from_queue/2, on_node_up/0,
- drop_mirror/2, drop_mirror/3, add_mirror/2, add_mirror/3]).
+ drop_mirror/2, drop_mirror/3, add_mirror/2, add_mirror/3,
+ report_deaths/4]).
-include("rabbit.hrl").
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(remove_from_queue/2 ::
+ (rabbit_amqqueue:name(), [pid()])
+ -> {'ok', pid(), [pid()]} | {'error', 'not_found'}).
+-spec(on_node_up/0 :: () -> 'ok').
+-spec(drop_mirror/2 ::
+ (rabbit_amqqueue:name(), node()) -> rabbit_types:ok_or_error(any())).
+-spec(add_mirror/2 ::
+ (rabbit_amqqueue:name(), node()) -> rabbit_types:ok_or_error(any())).
+-spec(add_mirror/3 ::
+ (rabbit_types:vhost(), binary(), atom())
+ -> rabbit_types:ok_or_error(any())).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
%% If the dead pids include the queue pid (i.e. the master has died)
%% then only remove that if we are about to be promoted. Otherwise we
%% can have the situation where a slave updates the mnesia record for
@@ -28,6 +49,7 @@
%% become the new master, which is bad because it could then mean the
%% slave (now master) receives messages it's not ready for (for
%% example, new consumers).
+%% Returns {ok, NewMPid, DeadPids}
remove_from_queue(QueueName, DeadPids) ->
DeadNodes = [node(DeadPid) || DeadPid <- DeadPids],
rabbit_misc:execute_mnesia_transaction(
@@ -38,27 +60,27 @@ remove_from_queue(QueueName, DeadPids) ->
[] -> {error, not_found};
[Q = #amqqueue { pid = QPid,
slave_pids = SPids }] ->
- [QPid1 | SPids1] =
+ [QPid1 | SPids1] = Alive =
[Pid || Pid <- [QPid | SPids],
not lists:member(node(Pid), DeadNodes)],
case {{QPid, SPids}, {QPid1, SPids1}} of
{Same, Same} ->
- ok;
+ {ok, QPid1, []};
_ when QPid =:= QPid1 orelse node(QPid1) =:= node() ->
%% Either master hasn't changed, so
%% we're ok to update mnesia; or we have
%% become the master.
Q1 = Q #amqqueue { pid = QPid1,
slave_pids = SPids1 },
- ok = rabbit_amqqueue:store_queue(Q1);
+ ok = rabbit_amqqueue:store_queue(Q1),
+ {ok, QPid1, [QPid | SPids] -- Alive};
_ ->
%% Master has changed, and we're not it,
%% so leave alone to allow the promoted
%% slave to find it and make its
%% promotion atomic.
- ok
- end,
- {ok, QPid1}
+ {ok, QPid1, []}
+ end
end
end).
@@ -133,3 +155,17 @@ if_mirrored_queue(Queue, Fun) ->
_ -> Fun(Q)
end
end).
+
+report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
+ ok;
+report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
+ rabbit_event:notify(queue_mirror_deaths, [{name, QueueName},
+ {pids, DeadPids}]),
+ rabbit_log:info("Mirrored-queue (~s): ~s ~s saw deaths of mirrors ~s~n",
+ [rabbit_misc:rs(QueueName),
+ case IsMaster of
+ true -> "Master";
+ false -> "Slave"
+ end,
+ rabbit_misc:pid_to_string(MirrorPid),
+ [[rabbit_misc:pid_to_string(P), $ ] || P <- DeadPids]]).
diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl
index b38a8967..43962491 100644
--- a/src/rabbit_mirror_queue_slave.erl
+++ b/src/rabbit_mirror_queue_slave.erl
@@ -33,11 +33,11 @@
%% All instructions from the GM group must be processed in the order
%% in which they're received.
--export([start_link/1, set_maximum_since_use/2]).
+-export([start_link/1, set_maximum_since_use/2, info/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3, handle_pre_hibernate/1, prioritise_call/3,
- prioritise_cast/2]).
+ prioritise_cast/2, prioritise_info/2]).
-export([joined/2, members_changed/3, handle_msg/3]).
@@ -45,8 +45,28 @@
-behaviour(gm).
-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
-include("gm_specs.hrl").
+-ifdef(use_specs).
+%% Shut dialyzer up
+-spec(promote_me/2 :: (_, _) -> no_return()).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+
+-define(CREATION_EVENT_KEYS,
+ [pid,
+ name,
+ master_pid,
+ is_synchronised
+ ]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS).
+
-define(SYNC_INTERVAL, 25). %% milliseconds
-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
-define(DEATH_TIMEOUT, 20000). %% 20 seconds
@@ -64,7 +84,9 @@
ack_num,
msg_id_status,
- known_senders
+ known_senders,
+
+ synchronised
}).
start_link(Q) ->
@@ -73,6 +95,9 @@ start_link(Q) ->
set_maximum_since_use(QPid, Age) ->
gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+info(QPid) ->
+ gen_server2:call(QPid, info, infinity).
+
init([#amqqueue { name = QueueName } = Q]) ->
process_flag(trap_exit, true), %% amqqueue_process traps exits too.
{ok, GM} = gm:start_link(QueueName, ?MODULE, [self()]),
@@ -89,33 +114,38 @@ init([#amqqueue { name = QueueName } = Q]) ->
%% ASSERTION
[] = [Pid || Pid <- [QPid | MPids], node(Pid) =:= Node],
MPids1 = MPids ++ [Self],
- mnesia:write(rabbit_queue,
- Q1 #amqqueue { slave_pids = MPids1 },
- write),
+ ok = rabbit_amqqueue:store_queue(
+ Q1 #amqqueue { slave_pids = MPids1 }),
{ok, QPid}
end),
erlang:monitor(process, MPid),
ok = file_handle_cache:register_callback(
- rabbit_amqqueue, set_maximum_since_use, [self()]),
+ rabbit_amqqueue, set_maximum_since_use, [Self]),
ok = rabbit_memory_monitor:register(
- self(), {rabbit_amqqueue, set_ram_duration_target, [self()]}),
+ Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
{ok, BQ} = application:get_env(backing_queue_module),
BQS = bq_init(BQ, Q, false),
- {ok, #state { q = Q,
- gm = GM,
- master_pid = MPid,
- backing_queue = BQ,
- backing_queue_state = BQS,
- rate_timer_ref = undefined,
- sync_timer_ref = undefined,
-
- sender_queues = dict:new(),
- msg_id_ack = dict:new(),
- ack_num = 0,
-
- msg_id_status = dict:new(),
- known_senders = dict:new()
- }, hibernate,
+ State = #state { q = Q,
+ gm = GM,
+ master_pid = MPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = undefined,
+ sync_timer_ref = undefined,
+
+ sender_queues = dict:new(),
+ msg_id_ack = dict:new(),
+ ack_num = 0,
+
+ msg_id_status = dict:new(),
+ known_senders = dict:new(),
+
+ synchronised = false
+ },
+ rabbit_event:notify(queue_slave_created,
+ infos(?CREATION_EVENT_KEYS, State)),
+ ok = gm:broadcast(GM, request_length),
+ {ok, State, hibernate,
{backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
handle_call({deliver_immediately, Delivery = #delivery {}}, From, State) ->
@@ -145,29 +175,32 @@ handle_call({gm_deaths, Deaths}, From,
State = #state { q = #amqqueue { name = QueueName },
gm = GM,
master_pid = MPid }) ->
- rabbit_log:info("Mirrored-queue (~s): Slave ~s saw deaths of mirrors ~s~n",
- [rabbit_misc:rs(QueueName),
- rabbit_misc:pid_to_string(self()),
- [[rabbit_misc:pid_to_string(Pid), $ ] || Pid <- Deaths]]),
%% The GM has told us about deaths, which means we're not going to
%% receive any more messages from GM
case rabbit_mirror_queue_misc:remove_from_queue(QueueName, Deaths) of
- {ok, Pid} when node(Pid) =:= node(MPid) ->
- %% master hasn't changed
- reply(ok, State);
- {ok, Pid} when node(Pid) =:= node() ->
- %% we've become master
- promote_me(From, State);
- {ok, Pid} ->
- %% master has changed to not us.
- gen_server2:reply(From, ok),
- erlang:monitor(process, Pid),
- ok = gm:broadcast(GM, heartbeat),
- noreply(State #state { master_pid = Pid });
{error, not_found} ->
gen_server2:reply(From, ok),
- {stop, normal, State}
- end.
+ {stop, normal, State};
+ {ok, Pid, DeadPids} ->
+ rabbit_mirror_queue_misc:report_deaths(self(), false, QueueName,
+ DeadPids),
+ if node(Pid) =:= node(MPid) ->
+ %% master hasn't changed
+ reply(ok, State);
+ node(Pid) =:= node() ->
+ %% we've become master
+ promote_me(From, State);
+ true ->
+ %% master has changed to not us.
+ gen_server2:reply(From, ok),
+ erlang:monitor(process, Pid),
+ ok = gm:broadcast(GM, heartbeat),
+ noreply(State #state { master_pid = Pid })
+ end
+ end;
+
+handle_call(info, _From, State) ->
+ reply(infos(?INFO_KEYS, State), State).
handle_cast({run_backing_queue, Mod, Fun}, State) ->
noreply(run_backing_queue(Mod, Fun, State));
@@ -187,9 +220,9 @@ handle_cast({set_ram_duration_target, Duration},
State = #state { backing_queue = BQ,
backing_queue_state = BQS }) ->
BQS1 = BQ:set_ram_duration_target(Duration, BQS),
- noreply(State #state { backing_queue_state = BQS1 });
+ noreply(State #state { backing_queue_state = BQS1 }).
-handle_cast(update_ram_duration,
+handle_info(update_ram_duration,
State = #state { backing_queue = BQ,
backing_queue_state = BQS }) ->
{RamDuration, BQS1} = BQ:ram_duration(BQS),
@@ -199,9 +232,9 @@ handle_cast(update_ram_duration,
noreply(State #state { rate_timer_ref = just_measured,
backing_queue_state = BQS2 });
-handle_cast(sync_timeout, State) ->
+handle_info(sync_timeout, State) ->
noreply(backing_queue_timeout(
- State #state { sync_timer_ref = undefined })).
+ State #state { sync_timer_ref = undefined }));
handle_info(timeout, State) ->
noreply(backing_queue_timeout(State));
@@ -260,22 +293,28 @@ handle_pre_hibernate(State = #state { backing_queue = BQ,
prioritise_call(Msg, _From, _State) ->
case Msg of
+ info -> 9;
{gm_deaths, _Deaths} -> 5;
_ -> 0
end.
prioritise_cast(Msg, _State) ->
case Msg of
- update_ram_duration -> 8;
{set_ram_duration_target, _Duration} -> 8;
{set_maximum_since_use, _Age} -> 8;
{run_backing_queue, _Mod, _Fun} -> 6;
- sync_timeout -> 6;
{gm, _Msg} -> 5;
{post_commit, _Txn, _AckTags} -> 4;
_ -> 0
end.
+prioritise_info(Msg, _State) ->
+ case Msg of
+ update_ram_duration -> 8;
+ sync_timeout -> 6;
+ _ -> 0
+ end.
+
%% ---------------------------------------------------------------------------
%% GM
%% ---------------------------------------------------------------------------
@@ -291,6 +330,9 @@ members_changed([SPid], _Births, Deaths) ->
handle_msg([_SPid], _From, heartbeat) ->
ok;
+handle_msg([_SPid], _From, request_length) ->
+ %% This is only of value to the master
+ ok;
handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) ->
%% This is only of value to the master
ok;
@@ -315,6 +357,14 @@ inform_deaths(SPid, Deaths) ->
%% Others
%% ---------------------------------------------------------------------------
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, _State) -> self();
+i(name, #state { q = #amqqueue { name = Name } }) -> Name;
+i(master_pid, #state { master_pid = MPid }) -> MPid;
+i(is_synchronised, #state { synchronised = Synchronised }) -> Synchronised;
+i(Item, _State) -> throw({bad_argument, Item}).
+
bq_init(BQ, Q, Recover) ->
Self = self(),
BQ:init(Q, Recover,
@@ -380,7 +430,7 @@ gb_trees_cons(Key, Value, Tree) ->
handle_process_result({ok, State}) -> noreply(State);
handle_process_result({stop, State}) -> {stop, normal, State}.
-promote_me(From, #state { q = Q,
+promote_me(From, #state { q = Q = #amqqueue { name = QName },
gm = GM,
backing_queue = BQ,
backing_queue_state = BQS,
@@ -389,12 +439,14 @@ promote_me(From, #state { q = Q,
msg_id_ack = MA,
msg_id_status = MS,
known_senders = KS }) ->
+ rabbit_event:notify(queue_slave_promoted, [{pid, self()},
+ {name, QName}]),
rabbit_log:info("Mirrored-queue (~s): Promoting slave ~s to master~n",
- [rabbit_misc:rs(Q #amqqueue.name),
- rabbit_misc:pid_to_string(self())]),
+ [rabbit_misc:rs(QName), rabbit_misc:pid_to_string(self())]),
Q1 = Q #amqqueue { pid = self() },
{ok, CPid} = rabbit_mirror_queue_coordinator:start_link(
- Q1, GM, rabbit_mirror_queue_master:sender_death_fun()),
+ Q1, GM, rabbit_mirror_queue_master:sender_death_fun(),
+ rabbit_mirror_queue_master:length_fun()),
true = unlink(GM),
gen_server2:reply(From, {promote, CPid}),
ok = gm:confirmed_broadcast(GM, heartbeat),
@@ -516,8 +568,7 @@ backing_queue_timeout(State = #state { backing_queue = BQ }) ->
run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State).
ensure_sync_timer(State = #state { sync_timer_ref = undefined }) ->
- {ok, TRef} = timer:apply_after(
- ?SYNC_INTERVAL, rabbit_amqqueue, sync_timeout, [self()]),
+ TRef = erlang:send_after(?SYNC_INTERVAL, self(), sync_timeout),
State #state { sync_timer_ref = TRef };
ensure_sync_timer(State) ->
State.
@@ -525,14 +576,12 @@ ensure_sync_timer(State) ->
stop_sync_timer(State = #state { sync_timer_ref = undefined }) ->
State;
stop_sync_timer(State = #state { sync_timer_ref = TRef }) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State #state { sync_timer_ref = undefined }.
ensure_rate_timer(State = #state { rate_timer_ref = undefined }) ->
- {ok, TRef} = timer:apply_after(
- ?RAM_DURATION_UPDATE_INTERVAL,
- rabbit_amqqueue, update_ram_duration,
- [self()]),
+ TRef = erlang:send_after(?RAM_DURATION_UPDATE_INTERVAL,
+ self(), update_ram_duration),
State #state { rate_timer_ref = TRef };
ensure_rate_timer(State = #state { rate_timer_ref = just_measured }) ->
State #state { rate_timer_ref = undefined };
@@ -544,7 +593,7 @@ stop_rate_timer(State = #state { rate_timer_ref = undefined }) ->
stop_rate_timer(State = #state { rate_timer_ref = just_measured }) ->
State #state { rate_timer_ref = undefined };
stop_rate_timer(State = #state { rate_timer_ref = TRef }) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State #state { rate_timer_ref = undefined }.
ensure_monitoring(ChPid, State = #state { known_senders = KS }) ->
@@ -748,7 +797,7 @@ process_instruction({set_length, Length},
backing_queue_state = BQS }) ->
QLen = BQ:len(BQS),
ToDrop = QLen - Length,
- {ok, case ToDrop > 0 of
+ {ok, case ToDrop >= 0 of
true -> BQS1 =
lists:foldl(
fun (const, BQSN) ->
@@ -756,7 +805,8 @@ process_instruction({set_length, Length},
BQSN1} = BQ:fetch(false, BQSN),
BQSN1
end, BQS, lists:duplicate(ToDrop, const)),
- State #state { backing_queue_state = BQS1 };
+ set_synchronised(
+ true, State #state { backing_queue_state = BQS1 });
false -> State
end};
process_instruction({fetch, AckRequired, MsgId, Remaining},
@@ -769,6 +819,8 @@ process_instruction({fetch, AckRequired, MsgId, Remaining},
AckTag, Remaining}, BQS1} = BQ:fetch(AckRequired, BQS),
maybe_store_ack(AckRequired, MsgId, AckTag,
State #state { backing_queue_state = BQS1 });
+ Other when Other + 1 =:= Remaining ->
+ set_synchronised(true, State);
Other when Other < Remaining ->
%% we must be shorter than the master
State
@@ -821,6 +873,10 @@ process_instruction({sender_death, ChPid},
msg_id_status = MS1,
known_senders = dict:erase(ChPid, KS) }
end};
+process_instruction({length, Length},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {ok, set_synchronised(Length =:= BQ:len(BQS), State)};
process_instruction({delete_and_terminate, Reason},
State = #state { backing_queue = BQ,
backing_queue_state = BQS }) ->
@@ -848,3 +904,15 @@ maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA,
ack_num = Num }) ->
State #state { msg_id_ack = dict:store(MsgId, {Num, AckTag}, MA),
ack_num = Num + 1 }.
+
+%% We intentionally leave out the head where a slave becomes
+%% unsynchronised: we assert that can never happen.
+set_synchronised(true, State = #state { q = #amqqueue { name = QName },
+ synchronised = false }) ->
+ rabbit_event:notify(queue_slave_synchronised, [{pid, self()},
+ {name, QName}]),
+ State #state { synchronised = true };
+set_synchronised(true, State) ->
+ State;
+set_synchronised(false, State = #state { synchronised = false }) ->
+ State.
diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl
index b6b97f6d..f2dc97fd 100644
--- a/src/rabbit_misc.erl
+++ b/src/rabbit_misc.erl
@@ -18,8 +18,6 @@
-include("rabbit.hrl").
-include("rabbit_framing.hrl").
--include_lib("kernel/include/file.hrl").
-
-export([method_record_type/1, polite_pause/0, polite_pause/1]).
-export([die/1, frame_error/2, amqp_error/4,
protocol_error/3, protocol_error/4, protocol_error/1]).
@@ -40,23 +38,22 @@
-export([upmap/2, map_in_order/2]).
-export([table_filter/3]).
-export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
--export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]).
--export([append_file/2, ensure_parent_dirs_exist/1]).
--export([format_stderr/2]).
+-export([format_stderr/2, with_local_io/1, local_info_msg/2]).
-export([start_applications/1, stop_applications/1]).
-export([unfold/2, ceil/1, queue_fold/3]).
-export([sort_field_table/1]).
-export([pid_to_string/1, string_to_pid/1]).
-export([version_compare/2, version_compare/3]).
--export([recursive_delete/1, recursive_copy/2, dict_cons/3, orddict_cons/3]).
+-export([dict_cons/3, orddict_cons/3]).
-export([get_options/2]).
-export([all_module_attributes/1, build_acyclic_graph/3]).
-export([now_ms/0]).
--export([lock_file/1]).
-export([const_ok/0, const/1]).
-export([ntoa/1, ntoab/1]).
-export([is_process_alive/1]).
-export([pget/2, pget/3, pget_or_die/2]).
+-export([format_message_queue/2]).
+-export([append_rpc_all_nodes/4]).
%%----------------------------------------------------------------------------
@@ -156,14 +153,9 @@
-spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom())
-> 'ok' | 'aborted').
-spec(dirty_dump_log/1 :: (file:filename()) -> ok_or_error()).
--spec(read_term_file/1 ::
- (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())).
--spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()).
--spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()).
--spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()).
--spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()).
--spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok').
-spec(format_stderr/2 :: (string(), [any()]) -> 'ok').
+-spec(with_local_io/1 :: (fun (() -> A)) -> A).
+-spec(local_info_msg/2 :: (string(), [any()]) -> 'ok').
-spec(start_applications/1 :: ([atom()]) -> 'ok').
-spec(stop_applications/1 :: ([atom()]) -> 'ok').
-spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}).
@@ -177,12 +169,6 @@
-spec(version_compare/3 ::
(string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt'))
-> boolean()).
--spec(recursive_delete/1 ::
- ([file:filename()])
- -> rabbit_types:ok_or_error({file:filename(), any()})).
--spec(recursive_copy/2 ::
- (file:filename(), file:filename())
- -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})).
-spec(dict_cons/3 :: (any(), any(), dict()) -> dict()).
-spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()).
-spec(get_options/2 :: ([optdef()], [string()])
@@ -196,7 +182,6 @@
{bad_edge, [digraph:vertex()]}),
digraph:vertex(), digraph:vertex()})).
-spec(now_ms/0 :: () -> non_neg_integer()).
--spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')).
-spec(const_ok/0 :: () -> 'ok').
-spec(const/1 :: (A) -> thunk(A)).
-spec(ntoa/1 :: (inet:ip_address()) -> string()).
@@ -205,6 +190,8 @@
-spec(pget/2 :: (term(), [term()]) -> term()).
-spec(pget/3 :: (term(), [term()], term()) -> term()).
-spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()).
+-spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()).
+-spec(append_rpc_all_nodes/4 :: ([node()], atom(), atom(), [any()]) -> [any()]).
-endif.
@@ -520,74 +507,6 @@ dirty_dump_log1(LH, {K, Terms, BadBytes}) ->
io:format("Bad Chunk, ~p: ~p~n", [BadBytes, Terms]),
dirty_dump_log1(LH, disk_log:chunk(LH, K)).
-
-read_term_file(File) -> file:consult(File).
-
-write_term_file(File, Terms) ->
- write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) ||
- Term <- Terms])).
-
-write_file(Path, Data) ->
- write_file(Path, Data, []).
-
-%% write_file/3 and make_binary/1 are both based on corresponding
-%% functions in the kernel/file.erl module of the Erlang R14B02
-%% release, which is licensed under the EPL. That implementation of
-%% write_file/3 does not do an fsync prior to closing the file, hence
-%% the existence of this version. APIs are otherwise identical.
-write_file(Path, Data, Modes) ->
- Modes1 = [binary, write | (Modes -- [binary, write])],
- case make_binary(Data) of
- Bin when is_binary(Bin) ->
- case file:open(Path, Modes1) of
- {ok, Hdl} -> try file:write(Hdl, Bin) of
- ok -> file:sync(Hdl);
- {error, _} = E -> E
- after
- file:close(Hdl)
- end;
- {error, _} = E -> E
- end;
- {error, _} = E -> E
- end.
-
-make_binary(Bin) when is_binary(Bin) ->
- Bin;
-make_binary(List) ->
- try
- iolist_to_binary(List)
- catch error:Reason ->
- {error, Reason}
- end.
-
-
-append_file(File, Suffix) ->
- case file:read_file_info(File) of
- {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix);
- {error, enoent} -> append_file(File, 0, Suffix);
- Error -> Error
- end.
-
-append_file(_, _, "") ->
- ok;
-append_file(File, 0, Suffix) ->
- case file:open([File, Suffix], [append]) of
- {ok, Fd} -> file:close(Fd);
- Error -> Error
- end;
-append_file(File, _, Suffix) ->
- case file:read_file(File) of
- {ok, Data} -> write_file([File, Suffix], Data, [append]);
- Error -> Error
- end.
-
-ensure_parent_dirs_exist(Filename) ->
- case filelib:ensure_dir(Filename) of
- ok -> ok;
- {error, Reason} ->
- throw({error, {cannot_create_parent_dirs, Filename, Reason}})
- end.
-
format_stderr(Fmt, Args) ->
case os:type() of
{unix, _} ->
@@ -603,6 +522,23 @@ format_stderr(Fmt, Args) ->
end,
ok.
+%% Execute Fun using the IO system of the local node (i.e. the node on
+%% which the code is executing).
+with_local_io(Fun) ->
+ GL = group_leader(),
+ group_leader(whereis(user), self()),
+ try
+ Fun()
+ after
+ group_leader(GL, self())
+ end.
+
+%% Log an info message on the local node using the standard logger.
+%% Use this if rabbit isn't running and the call didn't originate on
+%% the local node (e.g. rabbitmqctl calls).
+local_info_msg(Format, Args) ->
+ with_local_io(fun () -> error_logger:info_msg(Format, Args) end).
+
manage_applications(Iterate, Do, Undo, SkipError, ErrorTag, Apps) ->
Iterate(fun (App, Acc) ->
case Do(App) of
@@ -727,67 +663,6 @@ version_compare(A, B) ->
dropdot(A) -> lists:dropwhile(fun (X) -> X =:= $. end, A).
-recursive_delete(Files) ->
- lists:foldl(fun (Path, ok ) -> recursive_delete1(Path);
- (_Path, {error, _Err} = Error) -> Error
- end, ok, Files).
-
-recursive_delete1(Path) ->
- case filelib:is_dir(Path) of
- false -> case file:delete(Path) of
- ok -> ok;
- {error, enoent} -> ok; %% Path doesn't exist anyway
- {error, Err} -> {error, {Path, Err}}
- end;
- true -> case file:list_dir(Path) of
- {ok, FileNames} ->
- case lists:foldl(
- fun (FileName, ok) ->
- recursive_delete1(
- filename:join(Path, FileName));
- (_FileName, Error) ->
- Error
- end, ok, FileNames) of
- ok ->
- case file:del_dir(Path) of
- ok -> ok;
- {error, Err} -> {error, {Path, Err}}
- end;
- {error, _Err} = Error ->
- Error
- end;
- {error, Err} ->
- {error, {Path, Err}}
- end
- end.
-
-recursive_copy(Src, Dest) ->
- case filelib:is_dir(Src) of
- false -> case file:copy(Src, Dest) of
- {ok, _Bytes} -> ok;
- {error, enoent} -> ok; %% Path doesn't exist anyway
- {error, Err} -> {error, {Src, Dest, Err}}
- end;
- true -> case file:list_dir(Src) of
- {ok, FileNames} ->
- case file:make_dir(Dest) of
- ok ->
- lists:foldl(
- fun (FileName, ok) ->
- recursive_copy(
- filename:join(Src, FileName),
- filename:join(Dest, FileName));
- (_FileName, Error) ->
- Error
- end, ok, FileNames);
- {error, Err} ->
- {error, {Src, Dest, Err}}
- end;
- {error, Err} ->
- {error, {Src, Dest, Err}}
- end
- end.
-
dict_cons(Key, Value, Dict) ->
dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
@@ -877,15 +752,6 @@ build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
{error, Reason}
end.
-%% TODO: When we stop supporting Erlang prior to R14, this should be
-%% replaced with file:open [write, exclusive]
-lock_file(Path) ->
- case filelib:is_file(Path) of
- true -> {error, eexist};
- false -> {ok, Lock} = file:open(Path, [write]),
- ok = file:close(Lock)
- end.
-
const_ok() -> ok.
const(X) -> fun () -> X end.
@@ -919,3 +785,31 @@ pget_or_die(K, P) ->
undefined -> exit({error, key_missing, K});
V -> V
end.
+
+format_message_queue(_Opt, MQ) ->
+ Len = priority_queue:len(MQ),
+ {Len,
+ case Len > 100 of
+ false -> priority_queue:to_list(MQ);
+ true -> {summary,
+ orddict:to_list(
+ lists:foldl(
+ fun ({P, V}, Counts) ->
+ orddict:update_counter(
+ {P, format_message_queue_entry(V)}, 1, Counts)
+ end, orddict:new(), priority_queue:to_list(MQ)))}
+ end}.
+
+format_message_queue_entry(V) when is_atom(V) ->
+ V;
+format_message_queue_entry(V) when is_tuple(V) ->
+ list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]);
+format_message_queue_entry(_V) ->
+ '_'.
+
+append_rpc_all_nodes(Nodes, M, F, A) ->
+ {ResL, _} = rpc:multicall(Nodes, M, F, A),
+ lists:append([case Res of
+ {badrpc, _} -> [];
+ _ -> Res
+ end || Res <- ResL]).
diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl
index 8d5c8646..c8c18843 100644
--- a/src/rabbit_mnesia.erl
+++ b/src/rabbit_mnesia.erl
@@ -23,7 +23,8 @@
empty_ram_only_tables/0, copy_db/1, wait_for_tables/1,
create_cluster_nodes_config/1, read_cluster_nodes_config/0,
record_running_nodes/0, read_previously_running_nodes/0,
- delete_previously_running_nodes/0, running_nodes_filename/0]).
+ delete_previously_running_nodes/0, running_nodes_filename/0,
+ is_disc_node/0, on_node_down/1, on_node_up/1]).
-export([table_names/0]).
@@ -65,6 +66,11 @@
-spec(read_previously_running_nodes/0 :: () -> [node()]).
-spec(delete_previously_running_nodes/0 :: () -> 'ok').
-spec(running_nodes_filename/0 :: () -> file:filename()).
+-spec(is_disc_node/0 :: () -> boolean()).
+-spec(on_node_up/1 :: (node()) -> 'ok').
+-spec(on_node_down/1 :: (node()) -> 'ok').
+
+-spec(table_names/0 :: () -> [atom()]).
-endif.
@@ -83,7 +89,9 @@ status() ->
no -> case all_clustered_nodes() of
[] -> [];
Nodes -> [{unknown, Nodes}]
- end
+ end;
+ Reason when Reason =:= starting; Reason =:= stopping ->
+ exit({rabbit_busy, try_again_later})
end},
{running_nodes, running_clustered_nodes()}].
@@ -113,16 +121,66 @@ force_cluster(ClusterNodes) ->
%% node. If Force is false, only connections to online nodes are
%% allowed.
cluster(ClusterNodes, Force) ->
+ rabbit_misc:local_info_msg("Clustering with ~p~s~n",
+ [ClusterNodes, if Force -> " forcefully";
+ true -> ""
+ end]),
ensure_mnesia_not_running(),
ensure_mnesia_dir(),
- rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+
+ case not Force andalso is_clustered() andalso
+ is_only_disc_node(node(), false) andalso
+ not should_be_disc_node(ClusterNodes)
+ of
+ true -> log_both("last running disc node leaving cluster");
+ _ -> ok
+ end,
+
+ %% Wipe mnesia if we're changing type from disc to ram
+ case {is_disc_node(), should_be_disc_node(ClusterNodes)} of
+ {true, false} -> rabbit_misc:with_local_io(
+ fun () -> error_logger:warning_msg(
+ "changing node type; wiping "
+ "mnesia...~n~n")
+ end),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
+ cannot_delete_schema);
+ _ -> ok
+ end,
+
+ %% Pre-emptively leave the cluster
+ %%
+ %% We're trying to handle the following two cases:
+ %% 1. We have a two-node cluster, where both nodes are disc nodes.
+ %% One node is re-clustered as a ram node. When it tries to
+ %% re-join the cluster, but before it has time to update its
+ %% tables definitions, the other node will order it to re-create
+ %% its disc tables. So, we need to leave the cluster before we
+ %% can join it again.
+ %% 2. We have a two-node cluster, where both nodes are disc nodes.
+ %% One node is forcefully reset (so, the other node thinks its
+ %% still a part of the cluster). The reset node is re-clustered
+ %% as a ram node. Same as above, we need to leave the cluster
+ %% before we can join it. But, since we don't know if we're in a
+ %% cluster or not, we just pre-emptively leave it before joining.
+ ProperClusterNodes = ClusterNodes -- [node()],
+ try
+ ok = leave_cluster(ProperClusterNodes, ProperClusterNodes)
+ catch
+ {error, {no_running_cluster_nodes, _, _}} when Force ->
+ ok
+ end,
+
+ %% Join the cluster
+ start_mnesia(),
try
ok = init_db(ClusterNodes, Force,
fun maybe_upgrade_local_or_record_desired/0),
ok = create_cluster_nodes_config(ClusterNodes)
after
- mnesia:stop()
+ stop_mnesia()
end,
+
ok.
%% return node to its virgin state, where it is not member of any
@@ -158,10 +216,13 @@ nodes_of_type(Type) ->
%% This function should return the nodes of a certain type (ram,
%% disc or disc_only) in the current cluster. The type of nodes
%% is determined when the cluster is initially configured.
- %% Specifically, we check whether a certain table, which we know
- %% will be written to disk on a disc node, is stored on disk or in
- %% RAM.
- mnesia:table_info(rabbit_durable_exchange, Type).
+ mnesia:table_info(schema, Type).
+
+%% The tables aren't supposed to be on disk on a ram node
+table_definitions(disc) ->
+ table_definitions();
+table_definitions(ram) ->
+ [{Tab, copy_type_to_ram(TabDef)} || {Tab, TabDef} <- table_definitions()].
table_definitions() ->
[{rabbit_user,
@@ -218,8 +279,6 @@ table_definitions() ->
{type, ordered_set},
{match, #topic_trie_binding{trie_binding = trie_binding_match(),
_='_'}}]},
- %% Consider the implications to nodes_of_type/1 before altering
- %% the next entry.
{rabbit_durable_exchange,
[{record_name, exchange},
{attributes, record_info(fields, exchange)},
@@ -242,7 +301,8 @@ table_definitions() ->
[{record_name, amqqueue},
{attributes, record_info(fields, amqqueue)},
{match, #amqqueue{name = queue_name_match(), _='_'}}]}]
- ++ gm:table_definitions().
+ ++ gm:table_definitions()
+ ++ mirrored_supervisor:table_definitions().
binding_match() ->
#binding{source = exchange_name_match(),
@@ -288,14 +348,24 @@ ensure_mnesia_dir() ->
ensure_mnesia_running() ->
case mnesia:system_info(is_running) of
- yes -> ok;
- no -> throw({error, mnesia_not_running})
+ yes ->
+ ok;
+ starting ->
+ wait_for(mnesia_running),
+ ensure_mnesia_running();
+ Reason when Reason =:= no; Reason =:= stopping ->
+ throw({error, mnesia_not_running})
end.
ensure_mnesia_not_running() ->
case mnesia:system_info(is_running) of
- no -> ok;
- yes -> throw({error, mnesia_unexpectedly_running})
+ no ->
+ ok;
+ stopping ->
+ wait_for(mnesia_not_running),
+ ensure_mnesia_not_running();
+ Reason when Reason =:= yes; Reason =:= starting ->
+ throw({error, mnesia_unexpectedly_running})
end.
ensure_schema_integrity() ->
@@ -341,7 +411,11 @@ check_table_content(Tab, TabDef) ->
end.
check_tables(Fun) ->
- case [Error || {Tab, TabDef} <- table_definitions(),
+ case [Error || {Tab, TabDef} <- table_definitions(
+ case is_disc_node() of
+ true -> disc;
+ false -> ram
+ end),
case Fun(Tab, TabDef) of
ok -> Error = none, false;
{error, Error} -> true
@@ -364,7 +438,7 @@ cluster_nodes_config_filename() ->
create_cluster_nodes_config(ClusterNodes) ->
FileName = cluster_nodes_config_filename(),
- case rabbit_misc:write_term_file(FileName, [ClusterNodes]) of
+ case rabbit_file:write_term_file(FileName, [ClusterNodes]) of
ok -> ok;
{error, Reason} ->
throw({error, {cannot_create_cluster_nodes_config,
@@ -373,7 +447,7 @@ create_cluster_nodes_config(ClusterNodes) ->
read_cluster_nodes_config() ->
FileName = cluster_nodes_config_filename(),
- case rabbit_misc:read_term_file(FileName) of
+ case rabbit_file:read_term_file(FileName) of
{ok, [ClusterNodes]} -> ClusterNodes;
{error, enoent} ->
{ok, ClusterNodes} = application:get_env(rabbit, cluster_nodes),
@@ -401,12 +475,12 @@ record_running_nodes() ->
Nodes = running_clustered_nodes() -- [node()],
%% Don't check the result: we're shutting down anyway and this is
%% a best-effort-basis.
- rabbit_misc:write_term_file(FileName, [Nodes]),
+ rabbit_file:write_term_file(FileName, [Nodes]),
ok.
read_previously_running_nodes() ->
FileName = running_nodes_filename(),
- case rabbit_misc:read_term_file(FileName) of
+ case rabbit_file:read_term_file(FileName) of
{ok, [Nodes]} -> Nodes;
{error, enoent} -> [];
{error, Reason} -> throw({error, {cannot_read_previous_nodes_file,
@@ -442,30 +516,47 @@ init_db(ClusterNodes, Force, SecondaryPostMnesiaFun) ->
end;
true -> ok
end,
- case {Nodes, mnesia:system_info(use_dir)} of
- {[], false} ->
+ WantDiscNode = should_be_disc_node(ClusterNodes),
+ WasDiscNode = is_disc_node(),
+ %% We create a new db (on disk, or in ram) in the first
+ %% two cases and attempt to upgrade the in the other two
+ case {Nodes, WasDiscNode, WantDiscNode} of
+ {[], _, false} ->
+ %% New ram node; start from scratch
+ ok = create_schema(ram);
+ {[], false, true} ->
%% Nothing there at all, start from scratch
- ok = create_schema();
- {[], true} ->
+ ok = create_schema(disc);
+ {[], true, true} ->
%% We're the first node up
case rabbit_upgrade:maybe_upgrade_local() of
ok -> ensure_schema_integrity();
version_not_available -> ok = schema_ok_or_move()
- end,
- ok;
- {[AnotherNode|_], _} ->
+ end;
+ {[AnotherNode|_], _, _} ->
%% Subsequent node in cluster, catch up
ensure_version_ok(
rpc:call(AnotherNode, rabbit_version, recorded, [])),
- IsDiskNode = ClusterNodes == [] orelse
- lists:member(node(), ClusterNodes),
+ {CopyType, CopyTypeAlt} =
+ case WantDiscNode of
+ true -> {disc, disc_copies};
+ false -> {ram, ram_copies}
+ end,
ok = wait_for_replicated_tables(),
- ok = create_local_table_copy(schema, disc_copies),
- ok = create_local_table_copies(case IsDiskNode of
- true -> disc;
- false -> ram
- end),
+ ok = create_local_table_copy(schema, CopyTypeAlt),
+ ok = create_local_table_copies(CopyType),
+
ok = SecondaryPostMnesiaFun(),
+ %% We've taken down mnesia, so ram nodes will need
+ %% to re-sync
+ case is_disc_node() of
+ false -> start_mnesia(),
+ mnesia:change_config(extra_db_nodes,
+ ProperClusterNodes),
+ wait_for_replicated_tables();
+ true -> ok
+ end,
+
ensure_schema_integrity(),
ok
end;
@@ -496,7 +587,7 @@ schema_ok_or_move() ->
"and recreating schema from scratch~n",
[Reason]),
ok = move_db(),
- ok = create_schema()
+ ok = create_schema(disc)
end.
ensure_version_ok({ok, DiscVersion}) ->
@@ -508,18 +599,27 @@ ensure_version_ok({ok, DiscVersion}) ->
ensure_version_ok({error, _}) ->
ok = rabbit_version:record_desired().
-create_schema() ->
- mnesia:stop(),
- rabbit_misc:ensure_ok(mnesia:create_schema([node()]),
- cannot_create_schema),
- rabbit_misc:ensure_ok(mnesia:start(),
- cannot_start_mnesia),
- ok = create_tables(),
+create_schema(Type) ->
+ stop_mnesia(),
+ case Type of
+ disc -> rabbit_misc:ensure_ok(mnesia:create_schema([node()]),
+ cannot_create_schema);
+ ram -> %% remove the disc schema since this is a ram node
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
+ cannot_delete_schema)
+ end,
+ start_mnesia(),
+ ok = create_tables(Type),
ensure_schema_integrity(),
ok = rabbit_version:record_desired().
+is_disc_node() -> mnesia:system_info(use_dir).
+
+should_be_disc_node(ClusterNodes) ->
+ ClusterNodes == [] orelse lists:member(node(), ClusterNodes).
+
move_db() ->
- mnesia:stop(),
+ stop_mnesia(),
MnesiaDir = filename:dirname(dir() ++ "/"),
{{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(),
BackupDir = lists:flatten(
@@ -537,14 +637,16 @@ move_db() ->
MnesiaDir, BackupDir, Reason}})
end,
ensure_mnesia_dir(),
- rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ start_mnesia(),
ok.
copy_db(Destination) ->
ok = ensure_mnesia_not_running(),
- rabbit_misc:recursive_copy(dir(), Destination).
+ rabbit_file:recursive_copy(dir(), Destination).
+
+create_tables() -> create_tables(disc).
-create_tables() ->
+create_tables(Type) ->
lists:foreach(fun ({Tab, TabDef}) ->
TabDef1 = proplists:delete(match, TabDef),
case mnesia:create_table(Tab, TabDef1) of
@@ -554,9 +656,13 @@ create_tables() ->
Tab, TabDef1, Reason}})
end
end,
- table_definitions()),
+ table_definitions(Type)),
ok.
+copy_type_to_ram(TabDef) ->
+ [{disc_copies, []}, {ram_copies, [node()]}
+ | proplists:delete(ram_copies, proplists:delete(disc_copies, TabDef))].
+
table_has_copy_type(TabDef, DiscType) ->
lists:member(node(), proplists:get_value(DiscType, TabDef, [])).
@@ -586,7 +692,7 @@ create_local_table_copies(Type) ->
end,
ok = create_local_table_copy(Tab, StorageType)
end,
- table_definitions()),
+ table_definitions(Type)),
ok.
create_local_table_copy(Tab, Type) ->
@@ -616,20 +722,29 @@ wait_for_tables(TableNames) ->
end.
reset(Force) ->
+ rabbit_misc:local_info_msg("Resetting Rabbit~s~n", [if Force -> " forcefully";
+ true -> ""
+ end]),
ensure_mnesia_not_running(),
+ case not Force andalso is_clustered() andalso
+ is_only_disc_node(node(), false)
+ of
+ true -> log_both("no other disc nodes running");
+ false -> ok
+ end,
Node = node(),
case Force of
true -> ok;
false ->
ensure_mnesia_dir(),
- rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ start_mnesia(),
{Nodes, RunningNodes} =
try
ok = init(),
{all_clustered_nodes() -- [Node],
running_clustered_nodes() -- [Node]}
after
- mnesia:stop()
+ stop_mnesia()
end,
leave_cluster(Nodes, RunningNodes),
rabbit_misc:ensure_ok(mnesia:delete_schema([Node]),
@@ -637,7 +752,7 @@ reset(Force) ->
end,
ok = delete_cluster_nodes_config(),
%% remove persisted messages and any other garbage we find
- ok = rabbit_misc:recursive_delete(filelib:wildcard(dir() ++ "/*")),
+ ok = rabbit_file:recursive_delete(filelib:wildcard(dir() ++ "/*")),
ok.
leave_cluster([], _) -> ok;
@@ -652,6 +767,7 @@ leave_cluster(Nodes, RunningNodes) ->
[schema, node()]) of
{atomic, ok} -> true;
{badrpc, nodedown} -> false;
+ {aborted, {node_not_running, _}} -> false;
{aborted, Reason} ->
throw({error, {failed_to_leave_cluster,
Nodes, RunningNodes, Reason}})
@@ -662,3 +778,42 @@ leave_cluster(Nodes, RunningNodes) ->
false -> throw({error, {no_running_cluster_nodes,
Nodes, RunningNodes}})
end.
+
+wait_for(Condition) ->
+ error_logger:info_msg("Waiting for ~p...~n", [Condition]),
+ timer:sleep(1000).
+
+on_node_up(Node) ->
+ case is_only_disc_node(Node, true) of
+ true -> rabbit_log:info("cluster contains disc nodes again~n");
+ false -> ok
+ end.
+
+on_node_down(Node) ->
+ case is_only_disc_node(Node, true) of
+ true -> rabbit_log:info("only running disc node went down~n");
+ false -> ok
+ end.
+
+is_only_disc_node(Node, _MnesiaRunning = true) ->
+ RunningSet = sets:from_list(running_clustered_nodes()),
+ DiscSet = sets:from_list(nodes_of_type(disc_copies)),
+ [Node] =:= sets:to_list(sets:intersection(RunningSet, DiscSet));
+is_only_disc_node(Node, false) ->
+ start_mnesia(),
+ Res = is_only_disc_node(Node, true),
+ stop_mnesia(),
+ Res.
+
+log_both(Warning) ->
+ io:format("Warning: ~s~n", [Warning]),
+ rabbit_misc:with_local_io(
+ fun () -> error_logger:warning_msg("~s~n", [Warning]) end).
+
+start_mnesia() ->
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ensure_mnesia_running().
+
+stop_mnesia() ->
+ stopped = mnesia:stop(),
+ ensure_mnesia_not_running().
diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl
index 3f4162cd..fc3cbebd 100644
--- a/src/rabbit_msg_store.erl
+++ b/src/rabbit_msg_store.erl
@@ -21,21 +21,22 @@
-export([start_link/4, successfully_recovered_state/1,
client_init/4, client_terminate/1, client_delete_and_terminate/1,
client_ref/1, close_all_indicated/1,
- write/3, read/2, contains/2, remove/2, sync/3]).
+ write/3, read/2, contains/2, remove/2]).
--export([sync/1, set_maximum_since_use/2,
- has_readers/2, combine_files/3, delete_file/2]). %% internal
+-export([set_maximum_since_use/2, has_readers/2, combine_files/3,
+ delete_file/2]). %% internal
-export([transform_dir/3, force_recovery/2]). %% upgrade
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3, prioritise_call/3, prioritise_cast/2]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_call/3, prioritise_cast/2,
+ prioritise_info/2, format_message_queue/2]).
%%----------------------------------------------------------------------------
-include("rabbit_msg_store.hrl").
--define(SYNC_INTERVAL, 5). %% milliseconds
+-define(SYNC_INTERVAL, 25). %% milliseconds
-define(CLEAN_FILENAME, "clean.dot").
-define(FILE_SUMMARY_FILENAME, "file_summary.ets").
-define(TRANSFORM_TMP, "transform_tmp").
@@ -59,7 +60,6 @@
current_file, %% current file name as number
current_file_handle, %% current file handle since the last fsync?
file_handle_cache, %% file handle cache
- on_sync, %% pending sync requests
sync_timer_ref, %% TRef for our interval timer
sum_valid_data, %% sum of valid data in all files
sum_file_size, %% sum of file sizes
@@ -132,7 +132,8 @@
-type(msg_ref_delta_gen(A) ::
fun ((A) -> 'finished' |
{rabbit_types:msg_id(), non_neg_integer(), A})).
--type(maybe_msg_id_fun() :: 'undefined' | fun ((gb_set()) -> any())).
+-type(maybe_msg_id_fun() ::
+ 'undefined' | fun ((gb_set(), 'written' | 'removed') -> any())).
-type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')).
-type(deletion_thunk() :: fun (() -> boolean())).
@@ -145,15 +146,14 @@
-spec(client_terminate/1 :: (client_msstate()) -> 'ok').
-spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok').
-spec(client_ref/1 :: (client_msstate()) -> client_ref()).
+-spec(close_all_indicated/1 ::
+ (client_msstate()) -> rabbit_types:ok(client_msstate())).
-spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
-spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) ->
{rabbit_types:ok(msg()) | 'not_found', client_msstate()}).
-spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()).
-spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok').
--spec(sync/3 ::
- ([rabbit_types:msg_id()], fun (() -> any()), client_msstate()) -> 'ok').
--spec(sync/1 :: (server()) -> 'ok').
-spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok').
-spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()).
-spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) ->
@@ -441,10 +441,6 @@ contains(MsgId, CState) -> server_call(CState, {contains, MsgId}).
remove([], _CState) -> ok;
remove(MsgIds, CState = #client_msstate { client_ref = CRef }) ->
server_cast(CState, {remove, CRef, MsgIds}).
-sync(MsgIds, K, CState) -> server_cast(CState, {sync, MsgIds, K}).
-
-sync(Server) ->
- gen_server2:cast(Server, sync).
set_maximum_since_use(Server, Age) ->
gen_server2:cast(Server, {set_maximum_since_use, Age}).
@@ -593,7 +589,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
AttemptFileSummaryRecovery =
case ClientRefs of
- undefined -> ok = rabbit_misc:recursive_delete([Dir]),
+ undefined -> ok = rabbit_file:recursive_delete([Dir]),
ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
false;
_ -> ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
@@ -641,7 +637,6 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
current_file = 0,
current_file_handle = undefined,
file_handle_cache = dict:new(),
- on_sync = [],
sync_timer_ref = undefined,
sum_valid_data = 0,
sum_file_size = 0,
@@ -682,7 +677,6 @@ prioritise_call(Msg, _From, _State) ->
prioritise_cast(Msg, _State) ->
case Msg of
- sync -> 8;
{combine_files, _Source, _Destination, _Reclaimed} -> 8;
{delete_file, _File, _Reclaimed} -> 8;
{set_maximum_since_use, _Age} -> 8;
@@ -690,6 +684,12 @@ prioritise_cast(Msg, _State) ->
_ -> 0
end.
+prioritise_info(Msg, _State) ->
+ case Msg of
+ sync -> 8;
+ _ -> 0
+ end.
+
handle_call(successfully_recovered_state, _From, State) ->
reply(State #msstate.successfully_recovered, State);
@@ -758,24 +758,6 @@ handle_cast({remove, CRef, MsgIds}, State) ->
noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(MsgIds),
removed, State1)));
-handle_cast({sync, MsgIds, K},
- State = #msstate { current_file = CurFile,
- current_file_handle = CurHdl,
- on_sync = Syncs }) ->
- {ok, SyncOffset} = file_handle_cache:last_sync_offset(CurHdl),
- case lists:any(fun (MsgId) ->
- #msg_location { file = File, offset = Offset } =
- index_lookup(MsgId, State),
- File =:= CurFile andalso Offset >= SyncOffset
- end, MsgIds) of
- false -> K(),
- noreply(State);
- true -> noreply(State #msstate { on_sync = [K | Syncs] })
- end;
-
-handle_cast(sync, State) ->
- noreply(internal_sync(State));
-
handle_cast({combine_files, Source, Destination, Reclaimed},
State = #msstate { sum_file_size = SumFileSize,
file_handles_ets = FileHandlesEts,
@@ -799,6 +781,9 @@ handle_cast({set_maximum_since_use, Age}, State) ->
ok = file_handle_cache:set_maximum_since_use(Age),
noreply(State).
+handle_info(sync, State) ->
+ noreply(internal_sync(State));
+
handle_info(timeout, State) ->
noreply(internal_sync(State));
@@ -836,6 +821,8 @@ terminate(_Reason, State = #msstate { index_state = IndexState,
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
%%----------------------------------------------------------------------------
%% general helper functions
%%----------------------------------------------------------------------------
@@ -849,31 +836,28 @@ reply(Reply, State) ->
{reply, Reply, State1, Timeout}.
next_state(State = #msstate { sync_timer_ref = undefined,
- on_sync = Syncs,
cref_to_msg_ids = CTM }) ->
- case {Syncs, dict:size(CTM)} of
- {[], 0} -> {State, hibernate};
- _ -> {start_sync_timer(State), 0}
+ case dict:size(CTM) of
+ 0 -> {State, hibernate};
+ _ -> {start_sync_timer(State), 0}
end;
-next_state(State = #msstate { on_sync = Syncs,
- cref_to_msg_ids = CTM }) ->
- case {Syncs, dict:size(CTM)} of
- {[], 0} -> {stop_sync_timer(State), hibernate};
- _ -> {State, 0}
+next_state(State = #msstate { cref_to_msg_ids = CTM }) ->
+ case dict:size(CTM) of
+ 0 -> {stop_sync_timer(State), hibernate};
+ _ -> {State, 0}
end.
start_sync_timer(State = #msstate { sync_timer_ref = undefined }) ->
- {ok, TRef} = timer:apply_after(?SYNC_INTERVAL, ?MODULE, sync, [self()]),
+ TRef = erlang:send_after(?SYNC_INTERVAL, self(), sync),
State #msstate { sync_timer_ref = TRef }.
stop_sync_timer(State = #msstate { sync_timer_ref = undefined }) ->
State;
stop_sync_timer(State = #msstate { sync_timer_ref = TRef }) ->
- {ok, cancel} = timer:cancel(TRef),
+ erlang:cancel_timer(TRef),
State #msstate { sync_timer_ref = undefined }.
internal_sync(State = #msstate { current_file_handle = CurHdl,
- on_sync = Syncs,
cref_to_msg_ids = CTM }) ->
State1 = stop_sync_timer(State),
CGs = dict:fold(fun (CRef, MsgIds, NS) ->
@@ -882,16 +866,13 @@ internal_sync(State = #msstate { current_file_handle = CurHdl,
false -> [{CRef, MsgIds} | NS]
end
end, [], CTM),
- ok = case {Syncs, CGs} of
- {[], []} -> ok;
- _ -> file_handle_cache:sync(CurHdl)
+ ok = case CGs of
+ [] -> ok;
+ _ -> file_handle_cache:sync(CurHdl)
end,
- [K() || K <- lists:reverse(Syncs)],
- State2 = lists:foldl(
- fun ({CRef, MsgIds}, StateN) ->
- client_confirm(CRef, MsgIds, written, StateN)
- end, State1, CGs),
- State2 #msstate { on_sync = [] }.
+ lists:foldl(fun ({CRef, MsgIds}, StateN) ->
+ client_confirm(CRef, MsgIds, written, StateN)
+ end, State1, CGs).
write_action({true, not_found}, _MsgId, State) ->
{ignore, undefined, State};
@@ -1359,11 +1340,11 @@ recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Server) ->
end.
store_recovery_terms(Terms, Dir) ->
- rabbit_misc:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms).
+ rabbit_file:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms).
read_recovery_terms(Dir) ->
Path = filename:join(Dir, ?CLEAN_FILENAME),
- case rabbit_misc:read_term_file(Path) of
+ case rabbit_file:read_term_file(Path) of
{ok, Terms} -> case file:delete(Path) of
ok -> {true, Terms};
{error, Error} -> {false, Error}
@@ -1920,7 +1901,7 @@ transform_dir(BaseDir, Store, TransformFun) ->
end.
transform_msg_file(FileOld, FileNew, TransformFun) ->
- ok = rabbit_misc:ensure_parent_dirs_exist(FileNew),
+ ok = rabbit_file:ensure_parent_dirs_exist(FileNew),
{ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []),
{ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write],
[{write_buffer,
diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl
index 451e56e8..2c0912df 100644
--- a/src/rabbit_networking.erl
+++ b/src/rabbit_networking.erl
@@ -21,7 +21,7 @@
node_listeners/1, connections/0, connection_info_keys/0,
connection_info/1, connection_info/2,
connection_info_all/0, connection_info_all/1,
- close_connection/2]).
+ close_connection/2, force_connection_event_refresh/0]).
%%used by TCP-based transports, e.g. STOMP adapter
-export([check_tcp_listener_address/2,
@@ -30,6 +30,9 @@
-export([tcp_listener_started/3, tcp_listener_stopped/3,
start_client/1, start_ssl_client/2]).
+%% Internal
+-export([connections_local/0]).
+
-include("rabbit.hrl").
-include_lib("kernel/include/inet.hrl").
@@ -59,6 +62,7 @@
-spec(active_listeners/0 :: () -> [rabbit_types:listener()]).
-spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]).
-spec(connections/0 :: () -> [rabbit_types:connection()]).
+-spec(connections_local/0 :: () -> [rabbit_types:connection()]).
-spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()).
-spec(connection_info/1 ::
(rabbit_types:connection()) -> rabbit_types:infos()).
@@ -69,9 +73,38 @@
-spec(connection_info_all/1 ::
(rabbit_types:info_keys()) -> [rabbit_types:infos()]).
-spec(close_connection/2 :: (pid(), string()) -> 'ok').
+-spec(force_connection_event_refresh/0 :: () -> 'ok').
+
-spec(on_node_down/1 :: (node()) -> 'ok').
-spec(check_tcp_listener_address/2 :: (atom(), listener_config())
-> [{inet:ip_address(), ip_port(), family(), atom()}]).
+-spec(ensure_ssl/0 :: () -> rabbit_types:infos()).
+-spec(ssl_transform_fun/1 ::
+ (rabbit_types:infos())
+ -> fun ((rabbit_net:socket())
+ -> rabbit_types:ok_or_error(#ssl_socket{}))).
+
+-spec(boot/0 :: () -> 'ok').
+-spec(start_client/1 ::
+ (port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
+ atom() | pid() | port() | {atom(),atom()}).
+-spec(start_ssl_client/2 ::
+ (_,port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
+ atom() | pid() | port() | {atom(),atom()}).
+-spec(tcp_listener_started/3 ::
+ (_,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()},
+ _) ->
+ 'ok').
+-spec(tcp_listener_stopped/3 ::
+ (_,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()},
+ _) ->
+ 'ok').
-endif.
@@ -270,10 +303,13 @@ start_ssl_client(SslOpts, Sock) ->
start_client(Sock, ssl_transform_fun(SslOpts)).
connections() ->
+ rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:running_clustered_nodes(),
+ rabbit_networking, connections_local, []).
+
+connections_local() ->
[rabbit_connection_sup:reader(ConnSup) ||
- Node <- rabbit_mnesia:running_clustered_nodes(),
{_, ConnSup, supervisor, _}
- <- supervisor:which_children({rabbit_tcp_client_sup, Node})].
+ <- supervisor:which_children(rabbit_tcp_client_sup)].
connection_info_keys() -> rabbit_reader:info_keys().
@@ -284,11 +320,16 @@ connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
close_connection(Pid, Explanation) ->
+ rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
case lists:member(Pid, connections()) of
true -> rabbit_reader:shutdown(Pid, Explanation);
false -> throw({error, {not_a_connection_pid, Pid}})
end.
+force_connection_event_refresh() ->
+ [rabbit_reader:force_event_refresh(C) || C <- connections()],
+ ok.
+
%%--------------------------------------------------------------------
tcp_host({0,0,0,0}) ->
diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl
index 1f30a2fc..8aa24ab5 100644
--- a/src/rabbit_node_monitor.erl
+++ b/src/rabbit_node_monitor.erl
@@ -31,6 +31,7 @@
-ifdef(use_specs).
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-spec(rabbit_running_on/1 :: (node()) -> 'ok').
-spec(notify_cluster/0 :: () -> 'ok').
@@ -60,24 +61,19 @@ notify_cluster() ->
%%--------------------------------------------------------------------
init([]) ->
- ok = net_kernel:monitor_nodes(true),
{ok, no_state}.
handle_call(_Request, _From, State) ->
{noreply, State}.
handle_cast({rabbit_running_on, Node}, State) ->
- rabbit_log:info("node ~p up~n", [Node]),
+ rabbit_log:info("rabbit on ~p up~n", [Node]),
erlang:monitor(process, {rabbit, Node}),
- ok = rabbit_alarm:on_node_up(Node),
+ ok = handle_live_rabbit(Node),
{noreply, State};
handle_cast(_Msg, State) ->
{noreply, State}.
-handle_info({nodedown, Node}, State) ->
- rabbit_log:info("node ~p down~n", [Node]),
- ok = handle_dead_rabbit(Node),
- {noreply, State};
handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason}, State) ->
rabbit_log:info("node ~p lost 'rabbit'~n", [Node]),
ok = handle_dead_rabbit(Node),
@@ -99,4 +95,9 @@ code_change(_OldVsn, State, _Extra) ->
handle_dead_rabbit(Node) ->
ok = rabbit_networking:on_node_down(Node),
ok = rabbit_amqqueue:on_node_down(Node),
- ok = rabbit_alarm:on_node_down(Node).
+ ok = rabbit_alarm:on_node_down(Node),
+ ok = rabbit_mnesia:on_node_down(Node).
+
+handle_live_rabbit(Node) ->
+ ok = rabbit_alarm:on_node_up(Node),
+ ok = rabbit_mnesia:on_node_up(Node).
diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl
index 92829e49..cd0c322b 100644
--- a/src/rabbit_prelaunch.erl
+++ b/src/rabbit_prelaunch.erl
@@ -29,6 +29,9 @@
-spec(start/0 :: () -> no_return()).
-spec(stop/0 :: () -> 'ok').
+%% Shut dialyzer up
+-spec(terminate/1 :: (string()) -> no_return()).
+-spec(terminate/2 :: (string(), [any()]) -> no_return()).
-endif.
@@ -67,7 +70,7 @@ start() ->
AppVersions},
%% Write it out to $RABBITMQ_PLUGINS_EXPAND_DIR/rabbit.rel
- rabbit_misc:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])),
+ rabbit_file:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])),
%% We exclude mochiweb due to its optional use of fdsrv.
XRefExclude = [mochiweb],
@@ -136,38 +139,10 @@ determine_version(App) ->
{App, Vsn}.
delete_recursively(Fn) ->
- case filelib:is_dir(Fn) and not(is_symlink(Fn)) of
- true ->
- case file:list_dir(Fn) of
- {ok, Files} ->
- case lists:foldl(fun ( Fn1, ok) -> delete_recursively(
- Fn ++ "/" ++ Fn1);
- (_Fn1, Err) -> Err
- end, ok, Files) of
- ok -> case file:del_dir(Fn) of
- ok -> ok;
- {error, E} -> {error,
- {cannot_delete, Fn, E}}
- end;
- Err -> Err
- end;
- {error, E} ->
- {error, {cannot_list_files, Fn, E}}
- end;
- false ->
- case filelib:is_file(Fn) of
- true -> case file:delete(Fn) of
- ok -> ok;
- {error, E} -> {error, {cannot_delete, Fn, E}}
- end;
- false -> ok
- end
- end.
-
-is_symlink(Name) ->
- case file:read_link(Name) of
- {ok, _} -> true;
- _ -> false
+ case rabbit_file:recursive_delete([Fn]) of
+ ok -> ok;
+ {error, {Path, E}} -> {error, {cannot_delete, Path, E}};
+ Error -> Error
end.
unpack_ez_plugins(SrcDir, DestDir) ->
diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl
index bf89cdb2..f1751e95 100644
--- a/src/rabbit_queue_index.erl
+++ b/src/rabbit_queue_index.erl
@@ -229,7 +229,7 @@
init(Name, OnSyncFun) ->
State = #qistate { dir = Dir } = blank_state(Name),
- false = filelib:is_file(Dir), %% is_file == is file or dir
+ false = rabbit_file:is_file(Dir), %% is_file == is file or dir
State #qistate { on_sync = OnSyncFun }.
shutdown_terms(Name) ->
@@ -256,7 +256,7 @@ terminate(Terms, State) ->
delete_and_terminate(State) ->
{_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
- ok = rabbit_misc:recursive_delete([Dir]),
+ ok = rabbit_file:recursive_delete([Dir]),
State1.
publish(MsgId, SeqId, MsgProps, IsPersistent,
@@ -359,16 +359,16 @@ recover(DurableQueues) ->
{[dict:fetch(QueueDirName, DurableDict) | DurableAcc],
TermsAcc1};
false ->
- ok = rabbit_misc:recursive_delete([QueueDirPath]),
+ ok = rabbit_file:recursive_delete([QueueDirPath]),
{DurableAcc, TermsAcc}
end
end, {[], []}, QueueDirNames),
{DurableTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
all_queue_directory_names(Dir) ->
- case file:list_dir(Dir) of
+ case rabbit_file:list_dir(Dir) of
{ok, Entries} -> [ Entry || Entry <- Entries,
- filelib:is_dir(
+ rabbit_file:is_dir(
filename:join(Dir, Entry)) ];
{error, enoent} -> []
end.
@@ -392,18 +392,18 @@ blank_state(QueueName) ->
clean_file_name(Dir) -> filename:join(Dir, ?CLEAN_FILENAME).
detect_clean_shutdown(Dir) ->
- case file:delete(clean_file_name(Dir)) of
+ case rabbit_file:delete(clean_file_name(Dir)) of
ok -> true;
{error, enoent} -> false
end.
read_shutdown_terms(Dir) ->
- rabbit_misc:read_term_file(clean_file_name(Dir)).
+ rabbit_file:read_term_file(clean_file_name(Dir)).
store_clean_shutdown(Terms, Dir) ->
CleanFileName = clean_file_name(Dir),
- ok = filelib:ensure_dir(CleanFileName),
- rabbit_misc:write_term_file(CleanFileName, Terms).
+ ok = rabbit_file:ensure_dir(CleanFileName),
+ rabbit_file:write_term_file(CleanFileName, Terms).
init_clean(RecoveredCounts, State) ->
%% Load the journal. Since this is a clean recovery this (almost)
@@ -569,13 +569,13 @@ add_to_journal(SeqId, Action, State = #qistate { dirty_count = DCount,
add_to_journal(RelSeq, Action,
Segment = #segment { journal_entries = JEntries,
unacked = UnackedCount }) ->
- Segment1 = Segment #segment {
- journal_entries = add_to_journal(RelSeq, Action, JEntries) },
- case Action of
- del -> Segment1;
- ack -> Segment1 #segment { unacked = UnackedCount - 1 };
- ?PUB -> Segment1 #segment { unacked = UnackedCount + 1 }
- end;
+ Segment #segment {
+ journal_entries = add_to_journal(RelSeq, Action, JEntries),
+ unacked = UnackedCount + case Action of
+ ?PUB -> +1;
+ del -> 0;
+ ack -> -1
+ end};
add_to_journal(RelSeq, Action, JEntries) ->
Val = case array:get(RelSeq, JEntries) of
@@ -603,8 +603,8 @@ flush_journal(State = #qistate { segments = Segments }) ->
Segments1 =
segment_fold(
fun (#segment { unacked = 0, path = Path }, SegmentsN) ->
- case filelib:is_file(Path) of
- true -> ok = file:delete(Path);
+ case rabbit_file:is_file(Path) of
+ true -> ok = rabbit_file:delete(Path);
false -> ok
end,
SegmentsN;
@@ -630,7 +630,7 @@ append_journal_to_segment(#segment { journal_entries = JEntries,
get_journal_handle(State = #qistate { journal_handle = undefined,
dir = Dir }) ->
Path = filename:join(Dir, ?JOURNAL_FILENAME),
- ok = filelib:ensure_dir(Path),
+ ok = rabbit_file:ensure_dir(Path),
{ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
[{write_buffer, infinity}]),
{Hdl, State #qistate { journal_handle = Hdl }};
@@ -735,7 +735,7 @@ all_segment_nums(#qistate { dir = Dir, segments = Segments }) ->
lists:takewhile(fun (C) -> $0 =< C andalso C =< $9 end,
SegName)), Set)
end, sets:from_list(segment_nums(Segments)),
- filelib:wildcard("*" ++ ?SEGMENT_EXTENSION, Dir)))).
+ rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)))).
segment_find_or_new(Seg, Dir, Segments) ->
case segment_find(Seg, Segments) of
@@ -836,7 +836,7 @@ segment_entries_foldr(Fun, Init,
%%
%% Does not do any combining with the journal at all.
load_segment(KeepAcked, #segment { path = Path }) ->
- case filelib:is_file(Path) of
+ case rabbit_file:is_file(Path) of
false -> {array_new(), 0};
true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []),
{ok, 0} = file_handle_cache:position(Hdl, bof),
@@ -1013,7 +1013,7 @@ add_queue_ttl_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
{[<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>,
MsgId, expiry_to_binary(undefined)], Rest};
add_queue_ttl_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
- RelSeq:?REL_SEQ_BITS, Rest>>) ->
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
{<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
Rest};
add_queue_ttl_segment(_) ->
@@ -1040,12 +1040,12 @@ foreach_queue_index(Funs) ->
transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) ->
ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun),
[ok = transform_file(filename:join(Dir, Seg), SegmentFun)
- || Seg <- filelib:wildcard("*" ++ ?SEGMENT_EXTENSION, Dir)],
+ || Seg <- rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)],
ok = gatherer:finish(Gatherer).
transform_file(Path, Fun) ->
PathTmp = Path ++ ".upgrade",
- case filelib:file_size(Path) of
+ case rabbit_file:file_size(Path) of
0 -> ok;
Size -> {ok, PathTmpHdl} =
file_handle_cache:open(PathTmp, ?WRITE_MODE,
@@ -1059,7 +1059,7 @@ transform_file(Path, Fun) ->
ok = drive_transform_fun(Fun, PathTmpHdl, Content),
ok = file_handle_cache:close(PathTmpHdl),
- ok = file:rename(PathTmp, Path)
+ ok = rabbit_file:rename(PathTmp, Path)
end.
drive_transform_fun(Fun, Hdl, Contents) ->
diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl
index dffabf85..b4871cef 100644
--- a/src/rabbit_reader.erl
+++ b/src/rabbit_reader.erl
@@ -18,7 +18,8 @@
-include("rabbit_framing.hrl").
-include("rabbit.hrl").
--export([start_link/3, info_keys/0, info/1, info/2, shutdown/2]).
+-export([start_link/3, info_keys/0, info/1, info/2, force_event_refresh/1,
+ shutdown/2]).
-export([system_continue/3, system_terminate/4, system_code_change/4]).
@@ -28,8 +29,6 @@
-export([process_channel_frame/5]). %% used by erlang-client
--export([emit_stats/1]).
-
-define(HANDSHAKE_TIMEOUT, 10).
-define(NORMAL_TIMEOUT, 3).
-define(CLOSING_TIMEOUT, 1).
@@ -70,7 +69,7 @@
-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
-spec(info/1 :: (pid()) -> rabbit_types:infos()).
-spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(emit_stats/1 :: (pid()) -> 'ok').
+-spec(force_event_refresh/1 :: (pid()) -> 'ok').
-spec(shutdown/2 :: (pid(), string()) -> 'ok').
-spec(conserve_memory/2 :: (pid(), boolean()) -> 'ok').
-spec(server_properties/1 :: (rabbit_types:protocol()) ->
@@ -86,6 +85,15 @@
rabbit_types:ok_or_error2(
rabbit_net:socket(), any()))) -> no_return()).
+-spec(mainloop/2 :: (_,#v1{}) -> any()).
+-spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
+-spec(system_continue/3 :: (_,_,#v1{}) -> any()).
+-spec(system_terminate/4 :: (_,_,_,_) -> none()).
+
+-spec(process_channel_frame/5 ::
+ (rabbit_command_assembler:frame(), pid(), non_neg_integer(), pid(),
+ tuple()) -> tuple()).
+
-endif.
%%--------------------------------------------------------------------------
@@ -126,8 +134,8 @@ info(Pid, Items) ->
{error, Error} -> throw(Error)
end.
-emit_stats(Pid) ->
- gen_server:cast(Pid, emit_stats).
+force_event_refresh(Pid) ->
+ gen_server:cast(Pid, force_event_refresh).
conserve_memory(Pid, Conserve) ->
Pid ! {conserve_memory, Conserve},
@@ -323,8 +331,12 @@ handle_other({'$gen_call', From, {info, Items}}, Deb, State) ->
catch Error -> {error, Error}
end),
mainloop(Deb, State);
-handle_other({'$gen_cast', emit_stats}, Deb, State) ->
- mainloop(Deb, internal_emit_stats(State));
+handle_other({'$gen_cast', force_event_refresh}, Deb, State) ->
+ rabbit_event:notify(connection_created,
+ [{type, network} | infos(?CREATION_EVENT_KEYS, State)]),
+ mainloop(Deb, State);
+handle_other(emit_stats, Deb, State) ->
+ mainloop(Deb, emit_stats(State));
handle_other({system, From, Request}, Deb, State = #v1{parent = Parent}) ->
sys:handle_system_msg(Request, From, Parent, ?MODULE, Deb, State);
handle_other(Other, _Deb, _State) ->
@@ -490,20 +502,7 @@ handle_frame(Type, Channel, Payload,
AnalyzedFrame, self(),
Channel, ChPid, FramingState),
put({channel, Channel}, {ChPid, NewAState}),
- case AnalyzedFrame of
- {method, 'channel.close_ok', _} ->
- channel_cleanup(ChPid),
- State;
- {method, MethodName, _} ->
- case (State#v1.connection_state =:= blocking
- andalso
- Protocol:method_has_content(MethodName)) of
- true -> State#v1{connection_state = blocked};
- false -> State
- end;
- _ ->
- State
- end;
+ post_process_frame(AnalyzedFrame, ChPid, State);
undefined ->
case ?IS_RUNNING(State) of
true -> send_to_new_channel(
@@ -515,6 +514,23 @@ handle_frame(Type, Channel, Payload,
end
end.
+post_process_frame({method, 'channel.close_ok', _}, ChPid, State) ->
+ channel_cleanup(ChPid),
+ State;
+post_process_frame({method, MethodName, _}, _ChPid,
+ State = #v1{connection = #connection{
+ protocol = Protocol}}) ->
+ case Protocol:method_has_content(MethodName) of
+ true -> erlang:bump_reductions(2000),
+ case State#v1.connection_state of
+ blocking -> State#v1{connection_state = blocked};
+ _ -> State
+ end;
+ false -> State
+ end;
+post_process_frame(_Frame, _ChPid, State) ->
+ State.
+
handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32>>, State) ->
ensure_stats_timer(
switch_callback(State, {frame_payload, Type, Channel, PayloadSize},
@@ -591,10 +607,8 @@ refuse_connection(Sock, Exception) ->
ensure_stats_timer(State = #v1{stats_timer = StatsTimer,
connection_state = running}) ->
- Self = self(),
State#v1{stats_timer = rabbit_event:ensure_stats_timer(
- StatsTimer,
- fun() -> emit_stats(Self) end)};
+ StatsTimer, self(), emit_stats)};
ensure_stats_timer(State) ->
State.
@@ -694,7 +708,7 @@ handle_method0(#'connection.open'{virtual_host = VHostPath},
[{type, network} |
infos(?CREATION_EVENT_KEYS, State1)]),
rabbit_event:if_enabled(StatsTimer,
- fun() -> internal_emit_stats(State1) end),
+ fun() -> emit_stats(State1) end),
State1;
handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
@@ -923,6 +937,6 @@ send_exception(State = #v1{connection = #connection{protocol = Protocol}},
State1#v1.sock, 0, CloseMethod, Protocol),
State1.
-internal_emit_stats(State = #v1{stats_timer = StatsTimer}) ->
+emit_stats(State = #v1{stats_timer = StatsTimer}) ->
rabbit_event:notify(connection_stats, infos(?STATISTICS_KEYS, State)),
State#v1{stats_timer = rabbit_event:reset_stats_timer(StatsTimer)}.
diff --git a/src/rabbit_restartable_sup.erl b/src/rabbit_restartable_sup.erl
index 0491244b..cda3ccbe 100644
--- a/src/rabbit_restartable_sup.erl
+++ b/src/rabbit_restartable_sup.erl
@@ -24,6 +24,16 @@
-include("rabbit.hrl").
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/2 :: (atom(), mfa()) -> rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
start_link(Name, {_M, _F, _A} = Fun) ->
supervisor:start_link({local, Name}, ?MODULE, [Fun]).
diff --git a/src/rabbit_sasl_report_file_h.erl b/src/rabbit_sasl_report_file_h.erl
index 6f3c5c75..963294d9 100644
--- a/src/rabbit_sasl_report_file_h.erl
+++ b/src/rabbit_sasl_report_file_h.erl
@@ -26,12 +26,17 @@
%% with the result of closing the old handler when swapping handlers.
%% The first init/1 additionally allows for simple log rotation
%% when the suffix is not the empty string.
+%% The original init/1 also opened the file in 'write' mode, thus
+%% overwriting old logs. To remedy this, init/1 from
+%% lib/sasl/src/sasl_report_file_h.erl from R14B3 was copied as
+%% init_file/1 and changed so that it opens the file in 'append' mode.
%% Used only when swapping handlers and performing
%% log rotation
init({{File, Suffix}, []}) ->
- case rabbit_misc:append_file(File, Suffix) of
- ok -> ok;
+ case rabbit_file:append_file(File, Suffix) of
+ ok -> file:delete(File),
+ ok;
{error, Error} ->
rabbit_log:error("Failed to append contents of "
"sasl log file '~s' to '~s':~n~p~n",
@@ -47,11 +52,18 @@ init({{File, _}, error}) ->
init({File, []}) ->
init(File);
init({File, _Type} = FileInfo) ->
- rabbit_misc:ensure_parent_dirs_exist(File),
- sasl_report_file_h:init(FileInfo);
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file(FileInfo);
init(File) ->
- rabbit_misc:ensure_parent_dirs_exist(File),
- sasl_report_file_h:init({File, sasl_error_logger_type()}).
+ rabbit_file:ensure_parent_dirs_exist(File),
+ init_file({File, sasl_error_logger_type()}).
+
+init_file({File, Type}) ->
+ process_flag(trap_exit, true),
+ case file:open(File, [append]) of
+ {ok,Fd} -> {ok, {Fd, File, Type}};
+ Error -> Error
+ end.
handle_event(Event, State) ->
sasl_report_file_h:handle_event(Event, State).
diff --git a/src/rabbit_sup.erl b/src/rabbit_sup.erl
index 508b127e..802ea5e2 100644
--- a/src/rabbit_sup.erl
+++ b/src/rabbit_sup.erl
@@ -27,6 +27,21 @@
-define(SERVER, ?MODULE).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_child/1 :: (atom()) -> 'ok').
+-spec(start_child/3 :: (atom(), atom(), [any()]) -> 'ok').
+-spec(start_restartable_child/1 :: (atom()) -> 'ok').
+-spec(start_restartable_child/2 :: (atom(), [any()]) -> 'ok').
+-spec(stop_child/1 :: (atom()) -> rabbit_types:ok_or_error(any())).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl
index f2b6810e..da475037 100644
--- a/src/rabbit_tests.erl
+++ b/src/rabbit_tests.erl
@@ -20,6 +20,8 @@
-export([all_tests/0, test_parsing/0]).
+-import(rabbit_misc, [pget/2]).
+
-include("rabbit.hrl").
-include("rabbit_framing.hrl").
-include_lib("kernel/include/file.hrl").
@@ -36,6 +38,7 @@ test_content_prop_roundtrip(Datum, Binary) ->
all_tests() ->
passed = gm_tests:all_tests(),
+ passed = mirrored_supervisor_tests:all_tests(),
application:set_env(rabbit, file_handles_high_watermark, 10, infinity),
ok = file_handle_cache:set_limit(10),
passed = test_file_handle_cache(),
@@ -85,6 +88,7 @@ run_cluster_dependent_tests(SecondaryNode) ->
passed = test_delegates_sync(SecondaryNode),
passed = test_queue_cleanup(SecondaryNode),
passed = test_declare_on_dead_queue(SecondaryNode),
+ passed = test_refresh_events(SecondaryNode),
%% we now run the tests remotely, so that code coverage on the
%% local node picks up more of the delegate
@@ -94,7 +98,8 @@ run_cluster_dependent_tests(SecondaryNode) ->
fun () -> Rs = [ test_delegates_async(Node),
test_delegates_sync(Node),
test_queue_cleanup(Node),
- test_declare_on_dead_queue(Node) ],
+ test_declare_on_dead_queue(Node),
+ test_refresh_events(Node) ],
Self ! {self(), Rs}
end),
receive
@@ -203,6 +208,42 @@ test_priority_queue() ->
{true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
test_priority_queue(Q15),
+ %% 1-element infinity priority Q
+ Q16 = priority_queue:in(foo, infinity, Q),
+ {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
+
+ %% add infinity to 0-priority Q
+ Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q17),
+
+ %% and the other way around
+ Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q18),
+
+ %% add infinity to mixed-priority Q
+ Q19 = priority_queue:in(qux, infinity, Q3),
+ {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
+ test_priority_queue(Q19),
+
+ %% merge the above with a negative priority Q
+ Q20 = priority_queue:join(Q19, Q4),
+ {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
+ [qux, bar, foo, foo]} = test_priority_queue(Q20),
+
+ %% merge two infinity priority queues
+ Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
+ priority_queue:in(bar, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
+ test_priority_queue(Q21),
+
+ %% merge two mixed priority with infinity queues
+ Q22 = priority_queue:join(Q18, Q20),
+ {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
+ {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
+ test_priority_queue(Q22),
+
passed.
priority_queue_in_all(Q, L) ->
@@ -716,13 +757,23 @@ test_topic_expect_match(X, List) ->
end, List).
test_app_management() ->
- %% starting, stopping, status
+ control_action(wait, [rabbit_mnesia:dir() ++ ".pid"]),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = control_action(trace_on, []),
ok = control_action(stop_app, []),
ok = control_action(stop_app, []),
ok = control_action(status, []),
+ ok = control_action(cluster_status, []),
+ ok = control_action(environment, []),
ok = control_action(start_app, []),
ok = control_action(start_app, []),
ok = control_action(status, []),
+ ok = control_action(report, []),
+ ok = control_action(cluster_status, []),
+ ok = control_action(environment, []),
+ ok = control_action(trace_off, []),
passed.
test_log_management() ->
@@ -754,23 +805,11 @@ test_log_management() ->
ok = control_action(rotate_logs, []),
ok = test_logs_working(MainLog, SaslLog),
- %% log rotation on empty file
+ %% log rotation on empty files (the main log will have a ctl action logged)
ok = clean_logs([MainLog, SaslLog], Suffix),
ok = control_action(rotate_logs, []),
ok = control_action(rotate_logs, [Suffix]),
- [true, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
-
- %% original main log file is not writable
- ok = make_files_non_writable([MainLog]),
- {error, {cannot_rotate_main_logs, _}} = control_action(rotate_logs, []),
- ok = clean_logs([MainLog], Suffix),
- ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog}]),
-
- %% original sasl log file is not writable
- ok = make_files_non_writable([SaslLog]),
- {error, {cannot_rotate_sasl_logs, _}} = control_action(rotate_logs, []),
- ok = clean_logs([SaslLog], Suffix),
- ok = add_log_handlers([{rabbit_sasl_report_file_h, SaslLog}]),
+ [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
%% logs with suffix are not writable
ok = control_action(rotate_logs, [Suffix]),
@@ -778,27 +817,28 @@ test_log_management() ->
ok = control_action(rotate_logs, [Suffix]),
ok = test_logs_working(MainLog, SaslLog),
- %% original log files are not writable
+ %% rotate when original log files are not writable
ok = make_files_non_writable([MainLog, SaslLog]),
- {error, {{cannot_rotate_main_logs, _},
- {cannot_rotate_sasl_logs, _}}} = control_action(rotate_logs, []),
+ ok = control_action(rotate_logs, []),
- %% logging directed to tty (handlers were removed in last test)
+ %% logging directed to tty (first, remove handlers)
+ ok = delete_log_handlers([rabbit_sasl_report_file_h,
+ rabbit_error_logger_file_h]),
ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = application:set_env(sasl, sasl_error_logger, tty),
- ok = application:set_env(kernel, error_logger, tty),
+ ok = application:set_env(rabbit, sasl_error_logger, tty),
+ ok = application:set_env(rabbit, error_logger, tty),
ok = control_action(rotate_logs, []),
[{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
%% rotate logs when logging is turned off
- ok = application:set_env(sasl, sasl_error_logger, false),
- ok = application:set_env(kernel, error_logger, silent),
+ ok = application:set_env(rabbit, sasl_error_logger, false),
+ ok = application:set_env(rabbit, error_logger, silent),
ok = control_action(rotate_logs, []),
[{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
%% cleanup
- ok = application:set_env(sasl, sasl_error_logger, {file, SaslLog}),
- ok = application:set_env(kernel, error_logger, {file, MainLog}),
+ ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
+ ok = application:set_env(rabbit, error_logger, {file, MainLog}),
ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog},
{rabbit_sasl_report_file_h, SaslLog}]),
passed.
@@ -809,8 +849,8 @@ test_log_management_during_startup() ->
%% start application with simple tty logging
ok = control_action(stop_app, []),
- ok = application:set_env(kernel, error_logger, tty),
- ok = application:set_env(sasl, sasl_error_logger, tty),
+ ok = application:set_env(rabbit, error_logger, tty),
+ ok = application:set_env(rabbit, sasl_error_logger, tty),
ok = add_log_handlers([{error_logger_tty_h, []},
{sasl_report_tty_h, []}]),
ok = control_action(start_app, []),
@@ -827,13 +867,12 @@ test_log_management_during_startup() ->
end,
%% fix sasl logging
- ok = application:set_env(sasl, sasl_error_logger,
- {file, SaslLog}),
+ ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
%% start application with logging to non-existing directory
TmpLog = "/tmp/rabbit-tests/test.log",
delete_file(TmpLog),
- ok = application:set_env(kernel, error_logger, {file, TmpLog}),
+ ok = application:set_env(rabbit, error_logger, {file, TmpLog}),
ok = delete_log_handlers([rabbit_error_logger_file_h]),
ok = add_log_handlers([{error_logger_file_h, MainLog}]),
@@ -854,7 +893,7 @@ test_log_management_during_startup() ->
%% start application with logging to a subdirectory which
%% parent directory has no write permissions
TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log",
- ok = application:set_env(kernel, error_logger, {file, TmpTestDir}),
+ ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}),
ok = add_log_handlers([{error_logger_file_h, MainLog}]),
ok = case control_action(start_app, []) of
ok -> exit({got_success_but_expected_failure,
@@ -869,7 +908,7 @@ test_log_management_during_startup() ->
%% start application with standard error_logger_file_h
%% handler not installed
- ok = application:set_env(kernel, error_logger, {file, MainLog}),
+ ok = application:set_env(rabbit, error_logger, {file, MainLog}),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
@@ -904,7 +943,6 @@ test_option_parser() ->
passed.
test_cluster_management() ->
-
%% 'cluster' and 'reset' should only work if the app is stopped
{error, _} = control_action(cluster, []),
{error, _} = control_action(reset, []),
@@ -952,13 +990,16 @@ test_cluster_management() ->
ok = control_action(reset, []),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
+ ok = assert_disc_node(),
ok = control_action(force_cluster, ["invalid1@invalid",
"invalid2@invalid"]),
+ ok = assert_ram_node(),
%% join a non-existing cluster as a ram node
ok = control_action(reset, []),
ok = control_action(force_cluster, ["invalid1@invalid",
"invalid2@invalid"]),
+ ok = assert_ram_node(),
SecondaryNode = rabbit_misc:makenode("hare"),
case net_adm:ping(SecondaryNode) of
@@ -977,15 +1018,18 @@ test_cluster_management2(SecondaryNode) ->
%% make a disk node
ok = control_action(reset, []),
ok = control_action(cluster, [NodeS]),
+ ok = assert_disc_node(),
%% make a ram node
ok = control_action(reset, []),
ok = control_action(cluster, [SecondaryNodeS]),
+ ok = assert_ram_node(),
%% join cluster as a ram node
ok = control_action(reset, []),
ok = control_action(force_cluster, [SecondaryNodeS, "invalid1@invalid"]),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
+ ok = assert_ram_node(),
%% change cluster config while remaining in same cluster
ok = control_action(force_cluster, ["invalid2@invalid", SecondaryNodeS]),
@@ -997,27 +1041,45 @@ test_cluster_management2(SecondaryNode) ->
"invalid2@invalid"]),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
+ ok = assert_ram_node(),
- %% join empty cluster as a ram node
+ %% join empty cluster as a ram node (converts to disc)
ok = control_action(cluster, []),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
+ ok = assert_disc_node(),
- %% turn ram node into disk node
+ %% make a new ram node
ok = control_action(reset, []),
+ ok = control_action(force_cluster, [SecondaryNodeS]),
+ ok = control_action(start_app, []),
+ ok = control_action(stop_app, []),
+ ok = assert_ram_node(),
+
+ %% turn ram node into disk node
ok = control_action(cluster, [SecondaryNodeS, NodeS]),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
+ ok = assert_disc_node(),
%% convert a disk node into a ram node
+ ok = assert_disc_node(),
ok = control_action(force_cluster, ["invalid1@invalid",
"invalid2@invalid"]),
+ ok = assert_ram_node(),
+
+ %% make a new disk node
+ ok = control_action(force_reset, []),
+ ok = control_action(start_app, []),
+ ok = control_action(stop_app, []),
+ ok = assert_disc_node(),
%% turn a disk node into a ram node
ok = control_action(reset, []),
ok = control_action(cluster, [SecondaryNodeS]),
ok = control_action(start_app, []),
ok = control_action(stop_app, []),
+ ok = assert_ram_node(),
%% NB: this will log an inconsistent_database error, which is harmless
%% Turning cover on / off is OK even if we're not in general using cover,
@@ -1043,6 +1105,10 @@ test_cluster_management2(SecondaryNode) ->
{error, {no_running_cluster_nodes, _, _}} =
control_action(reset, []),
+ %% attempt to change type when no other node is alive
+ {error, {no_running_cluster_nodes, _, _}} =
+ control_action(cluster, [SecondaryNodeS]),
+
%% leave system clustered, with the secondary node as a ram node
ok = control_action(force_reset, []),
ok = control_action(start_app, []),
@@ -1078,6 +1144,7 @@ test_user_management() ->
ok = control_action(add_user, ["foo", "bar"]),
{error, {user_already_exists, _}} =
control_action(add_user, ["foo", "bar"]),
+ ok = control_action(clear_password, ["foo"]),
ok = control_action(change_password, ["foo", "baz"]),
TestTags = fun (Tags) ->
@@ -1136,15 +1203,16 @@ test_server_status() ->
{ok, Ch} = rabbit_channel:start_link(
1, self(), Writer, self(), rabbit_framing_amqp_0_9_1,
user(<<"user">>), <<"/">>, [], self(),
- fun (_) -> {ok, self()} end),
+ rabbit_limiter:make_token(self())),
[Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>],
{new, Queue = #amqqueue{}} <-
[rabbit_amqqueue:declare(
rabbit_misc:r(<<"/">>, queue, Name),
false, false, [], none)]],
- ok = rabbit_amqqueue:basic_consume(Q, true, Ch, undefined,
- <<"ctag">>, true, undefined),
+ ok = rabbit_amqqueue:basic_consume(
+ Q, true, Ch, rabbit_limiter:make_token(),
+ <<"ctag">>, true, undefined),
%% list queues
ok = info_action(list_queues, rabbit_amqqueue:info_keys(), true),
@@ -1202,14 +1270,34 @@ test_spawn() ->
Writer = spawn(fun () -> test_writer(Me) end),
{ok, Ch} = rabbit_channel:start_link(
1, Me, Writer, Me, rabbit_framing_amqp_0_9_1,
- user(<<"guest">>), <<"/">>, [], self(),
- fun (_) -> {ok, self()} end),
+ user(<<"guest">>), <<"/">>, [], Me,
+ rabbit_limiter:make_token(self())),
ok = rabbit_channel:do(Ch, #'channel.open'{}),
receive #'channel.open_ok'{} -> ok
after 1000 -> throw(failed_to_receive_channel_open_ok)
end,
{Writer, Ch}.
+test_spawn(Node) ->
+ rpc:call(Node, ?MODULE, test_spawn_remote, []).
+
+%% Spawn an arbitrary long lived process, so we don't end up linking
+%% the channel to the short-lived process (RPC, here) spun up by the
+%% RPC server.
+test_spawn_remote() ->
+ RPC = self(),
+ spawn(fun () ->
+ {Writer, Ch} = test_spawn(),
+ RPC ! {Writer, Ch},
+ link(Ch),
+ receive
+ _ -> ok
+ end
+ end),
+ receive Res -> Res
+ after 1000 -> throw(failed_to_receive_result)
+ end.
+
user(Username) ->
#user{username = Username,
tags = [administrator],
@@ -1217,25 +1305,6 @@ user(Username) ->
impl = #internal_user{username = Username,
tags = [administrator]}}.
-test_statistics_event_receiver(Pid) ->
- receive
- Foo -> Pid ! Foo, test_statistics_event_receiver(Pid)
- end.
-
-test_statistics_receive_event(Ch, Matcher) ->
- rabbit_channel:flush(Ch),
- rabbit_channel:emit_stats(Ch),
- test_statistics_receive_event1(Ch, Matcher).
-
-test_statistics_receive_event1(Ch, Matcher) ->
- receive #event{type = channel_stats, props = Props} ->
- case Matcher(Props) of
- true -> Props;
- _ -> test_statistics_receive_event1(Ch, Matcher)
- end
- after 1000 -> throw(failed_to_receive_event)
- end.
-
test_confirms() ->
{_Writer, Ch} = test_spawn(),
DeclareBindDurableQueue =
@@ -1296,6 +1365,25 @@ test_confirms() ->
passed.
+test_statistics_event_receiver(Pid) ->
+ receive
+ Foo -> Pid ! Foo, test_statistics_event_receiver(Pid)
+ end.
+
+test_statistics_receive_event(Ch, Matcher) ->
+ rabbit_channel:flush(Ch),
+ Ch ! emit_stats,
+ test_statistics_receive_event1(Ch, Matcher).
+
+test_statistics_receive_event1(Ch, Matcher) ->
+ receive #event{type = channel_stats, props = Props} ->
+ case Matcher(Props) of
+ true -> Props;
+ _ -> test_statistics_receive_event1(Ch, Matcher)
+ end
+ after 1000 -> throw(failed_to_receive_event)
+ end.
+
test_statistics() ->
application:set_env(rabbit, collect_statistics, fine),
@@ -1313,7 +1401,7 @@ test_statistics() ->
QPid = Q#amqqueue.pid,
X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
- rabbit_tests_event_receiver:start(self()),
+ rabbit_tests_event_receiver:start(self(), [node()], [channel_stats]),
%% Check stats empty
Event = test_statistics_receive_event(Ch, fun (_) -> true end),
@@ -1356,6 +1444,40 @@ test_statistics() ->
rabbit_tests_event_receiver:stop(),
passed.
+test_refresh_events(SecondaryNode) ->
+ rabbit_tests_event_receiver:start(self(), [node(), SecondaryNode],
+ [channel_created, queue_created]),
+
+ {_Writer, Ch} = test_spawn(),
+ expect_events(Ch, channel_created),
+ rabbit_channel:shutdown(Ch),
+
+ {_Writer2, Ch2} = test_spawn(SecondaryNode),
+ expect_events(Ch2, channel_created),
+ rabbit_channel:shutdown(Ch2),
+
+ {new, #amqqueue { pid = QPid } = Q} =
+ rabbit_amqqueue:declare(test_queue(), false, false, [], none),
+ expect_events(QPid, queue_created),
+ rabbit_amqqueue:delete(Q, false, false),
+
+ rabbit_tests_event_receiver:stop(),
+ passed.
+
+expect_events(Pid, Type) ->
+ expect_event(Pid, Type),
+ rabbit:force_event_refresh(),
+ expect_event(Pid, Type).
+
+expect_event(Pid, Type) ->
+ receive #event{type = Type, props = Props} ->
+ case pget(pid, Props) of
+ Pid -> ok;
+ _ -> expect_event(Pid, Type)
+ end
+ after 1000 -> throw({failed_to_receive_event, Type})
+ end.
+
test_delegates_async(SecondaryNode) ->
Self = self(),
Sender = fun (Pid) -> Pid ! {invoked, Self} end,
@@ -1461,16 +1583,19 @@ test_queue_cleanup(_SecondaryNode) ->
ok
after 1000 -> throw(failed_to_receive_queue_declare_ok)
end,
+ rabbit_channel:shutdown(Ch),
rabbit:stop(),
rabbit:start(),
- rabbit_channel:do(Ch, #'queue.declare'{ passive = true,
- queue = ?CLEANUP_QUEUE_NAME }),
+ {_Writer2, Ch2} = test_spawn(),
+ rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
+ queue = ?CLEANUP_QUEUE_NAME }),
receive
#'channel.close'{reply_code = ?NOT_FOUND} ->
ok
after 2000 ->
throw(failed_to_receive_channel_exit)
end,
+ rabbit_channel:shutdown(Ch2),
passed.
test_declare_on_dead_queue(SecondaryNode) ->
@@ -1581,6 +1706,18 @@ clean_logs(Files, Suffix) ->
end || File <- Files],
ok.
+assert_ram_node() ->
+ case rabbit_mnesia:is_disc_node() of
+ true -> exit('not_ram_node');
+ false -> ok
+ end.
+
+assert_disc_node() ->
+ case rabbit_mnesia:is_disc_node() of
+ true -> ok;
+ false -> exit('not_disc_node')
+ end.
+
delete_file(File) ->
case file:delete(File) of
ok -> ok;
@@ -1616,7 +1753,11 @@ test_file_handle_cache() ->
[filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
Content = <<"foo">>,
CopyFun = fun (Src, Dst) ->
- ok = rabbit_misc:write_file(Src, Content),
+ {ok, Hdl} = prim_file:open(Src, [binary, write]),
+ ok = prim_file:write(Hdl, Content),
+ ok = prim_file:sync(Hdl),
+ prim_file:close(Hdl),
+
{ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
{ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
Size = size(Content),
@@ -1692,25 +1833,49 @@ msg_id_bin(X) ->
msg_store_client_init(MsgStore, Ref) ->
rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
+on_disk_capture() ->
+ on_disk_capture({gb_sets:new(), gb_sets:new(), undefined}).
+on_disk_capture({OnDisk, Awaiting, Pid}) ->
+ Pid1 = case Pid =/= undefined andalso gb_sets:is_empty(Awaiting) of
+ true -> Pid ! {self(), arrived}, undefined;
+ false -> Pid
+ end,
+ receive
+ {await, MsgIds, Pid2} ->
+ true = Pid1 =:= undefined andalso gb_sets:is_empty(Awaiting),
+ on_disk_capture({OnDisk, gb_sets:subtract(MsgIds, OnDisk), Pid2});
+ {on_disk, MsgIds} ->
+ on_disk_capture({gb_sets:union(OnDisk, MsgIds),
+ gb_sets:subtract(Awaiting, MsgIds),
+ Pid1});
+ stop ->
+ done
+ end.
+
+on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
+ Pid ! {await, gb_sets:from_list(MsgIds), self()},
+ receive {Pid, arrived} -> ok end.
+
+on_disk_stop(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ Pid ! stop,
+ receive {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end.
+
+msg_store_client_init_capture(MsgStore, Ref) ->
+ Pid = spawn(fun on_disk_capture/0),
+ {Pid, rabbit_msg_store:client_init(
+ MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
+ Pid ! {on_disk, MsgIds}
+ end, undefined)}.
+
msg_store_contains(Atom, MsgIds, MSCState) ->
Atom = lists:foldl(
fun (MsgId, Atom1) when Atom1 =:= Atom ->
rabbit_msg_store:contains(MsgId, MSCState) end,
Atom, MsgIds).
-msg_store_sync(MsgIds, MSCState) ->
- Ref = make_ref(),
- Self = self(),
- ok = rabbit_msg_store:sync(MsgIds, fun () -> Self ! {sync, Ref} end,
- MSCState),
- receive
- {sync, Ref} -> ok
- after
- 10000 ->
- io:format("Sync from msg_store missing for msg_ids ~p~n", [MsgIds]),
- throw(timeout)
- end.
-
msg_store_read(MsgIds, MSCState) ->
lists:foldl(fun (MsgId, MSCStateM) ->
{{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
@@ -1744,22 +1909,18 @@ foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
test_msg_store() ->
restart_msg_store_empty(),
- Self = self(),
MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
{MsgIds1stHalf, MsgIds2ndHalf} = lists:split(50, MsgIds),
Ref = rabbit_guid:guid(),
- MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ {Cap, MSCState} = msg_store_client_init_capture(?PERSISTENT_MSG_STORE, Ref),
%% check we don't contain any of the msgs we're about to publish
false = msg_store_contains(false, MsgIds, MSCState),
%% publish the first half
ok = msg_store_write(MsgIds1stHalf, MSCState),
%% sync on the first half
- ok = msg_store_sync(MsgIds1stHalf, MSCState),
+ ok = on_disk_await(Cap, MsgIds1stHalf),
%% publish the second half
ok = msg_store_write(MsgIds2ndHalf, MSCState),
- %% sync on the first half again - the msg_store will be dirty, but
- %% we won't need the fsync
- ok = msg_store_sync(MsgIds1stHalf, MSCState),
%% check they're all in there
true = msg_store_contains(true, MsgIds, MSCState),
%% publish the latter half twice so we hit the caching and ref count code
@@ -1768,25 +1929,8 @@ test_msg_store() ->
true = msg_store_contains(true, MsgIds, MSCState),
%% sync on the 2nd half, but do lots of individual syncs to try
%% and cause coalescing to happen
- ok = lists:foldl(
- fun (MsgId, ok) -> rabbit_msg_store:sync(
- [MsgId], fun () -> Self ! {sync, MsgId} end,
- MSCState)
- end, ok, MsgIds2ndHalf),
- lists:foldl(
- fun(MsgId, ok) ->
- receive
- {sync, MsgId} -> ok
- after
- 10000 ->
- io:format("Sync from msg_store missing (msg_id: ~p)~n",
- [MsgId]),
- throw(timeout)
- end
- end, ok, MsgIds2ndHalf),
- %% it's very likely we're not dirty here, so the 1st half sync
- %% should hit a different code path
- ok = msg_store_sync(MsgIds1stHalf, MSCState),
+ ok = on_disk_await(Cap, MsgIds2ndHalf),
+ ok = on_disk_stop(Cap),
%% read them all
MSCState1 = msg_store_read(MsgIds, MSCState),
%% read them all again - this will hit the cache, not disk
diff --git a/src/rabbit_tests_event_receiver.erl b/src/rabbit_tests_event_receiver.erl
index 12c43faf..abcbe0b6 100644
--- a/src/rabbit_tests_event_receiver.erl
+++ b/src/rabbit_tests_event_receiver.erl
@@ -16,36 +16,43 @@
-module(rabbit_tests_event_receiver).
--export([start/1, stop/0]).
+-export([start/3, stop/0]).
-export([init/1, handle_call/2, handle_event/2, handle_info/2,
terminate/2, code_change/3]).
-start(Pid) ->
- gen_event:add_handler(rabbit_event, ?MODULE, [Pid]).
+-include("rabbit.hrl").
+
+start(Pid, Nodes, Types) ->
+ Oks = [ok || _ <- Nodes],
+ {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
+ [rabbit_event, ?MODULE, [Pid, Types]]).
stop() ->
gen_event:delete_handler(rabbit_event, ?MODULE, []).
%%----------------------------------------------------------------------------
-init([Pid]) ->
- {ok, Pid}.
+init([Pid, Types]) ->
+ {ok, {Pid, Types}}.
-handle_call(_Request, Pid) ->
- {ok, not_understood, Pid}.
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
-handle_event(Event, Pid) ->
- Pid ! Event,
- {ok, Pid}.
+handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
+ case lists:member(Type, Types) of
+ true -> Pid ! Event;
+ false -> ok
+ end,
+ {ok, State}.
-handle_info(_Info, Pid) ->
- {ok, Pid}.
+handle_info(_Info, State) ->
+ {ok, State}.
-terminate(_Arg, _Pid) ->
+terminate(_Arg, _State) ->
ok.
-code_change(_OldVsn, Pid, _Extra) ->
- {ok, Pid}.
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
%%----------------------------------------------------------------------------
diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl
index 7d36856a..58079ccf 100644
--- a/src/rabbit_trace.erl
+++ b/src/rabbit_trace.erl
@@ -67,16 +67,18 @@ tap_trace_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg},
%%----------------------------------------------------------------------------
start(VHost) ->
+ rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]),
update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end).
stop(VHost) ->
+ rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]),
update_config(fun (VHosts) -> VHosts -- [VHost] end).
update_config(Fun) ->
{ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS),
VHosts = Fun(VHosts0),
application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
- rabbit_channel:refresh_config_all(),
+ rabbit_channel:refresh_config_local(),
ok.
%%----------------------------------------------------------------------------
diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl
index a2abb1e5..717d94a8 100644
--- a/src/rabbit_upgrade.erl
+++ b/src/rabbit_upgrade.erl
@@ -115,7 +115,7 @@ ensure_backup_removed() ->
end.
remove_backup() ->
- ok = rabbit_misc:recursive_delete([backup_dir()]),
+ ok = rabbit_file:recursive_delete([backup_dir()]),
info("upgrades: Mnesia backup removed~n", []).
maybe_upgrade_mnesia() ->
@@ -144,7 +144,7 @@ upgrade_mode(AllNodes) ->
case nodes_running(AllNodes) of
[] ->
AfterUs = rabbit_mnesia:read_previously_running_nodes(),
- case {is_disc_node(), AfterUs} of
+ case {is_disc_node_legacy(), AfterUs} of
{true, []} ->
primary;
{true, _} ->
@@ -182,12 +182,6 @@ upgrade_mode(AllNodes) ->
end
end.
-is_disc_node() ->
- %% This is pretty ugly but we can't start Mnesia and ask it (will hang),
- %% we can't look at the config file (may not include us even if we're a
- %% disc node).
- filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")).
-
die(Msg, Args) ->
%% We don't throw or exit here since that gets thrown
%% straight out into do_boot, generating an erl_crash.dump
@@ -218,7 +212,7 @@ force_tables() ->
secondary_upgrade(AllNodes) ->
%% must do this before we wipe out schema
- IsDiscNode = is_disc_node(),
+ IsDiscNode = is_disc_node_legacy(),
rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
cannot_delete_schema),
%% Note that we cluster with all nodes, rather than all disc nodes
@@ -234,13 +228,7 @@ secondary_upgrade(AllNodes) ->
ok.
nodes_running(Nodes) ->
- [N || N <- Nodes, node_running(N)].
-
-node_running(Node) ->
- case rpc:call(Node, application, which_applications, []) of
- {badrpc, _} -> false;
- Apps -> lists:keysearch(rabbit, 1, Apps) =/= false
- end.
+ [N || N <- Nodes, rabbit:is_running(N)].
%% -------------------------------------------------------------------
@@ -261,7 +249,7 @@ maybe_upgrade_local() ->
%% -------------------------------------------------------------------
apply_upgrades(Scope, Upgrades, Fun) ->
- ok = rabbit_misc:lock_file(lock_filename()),
+ ok = rabbit_file:lock_file(lock_filename()),
info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
Fun(),
@@ -282,6 +270,14 @@ lock_filename() -> lock_filename(dir()).
lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME).
backup_dir() -> dir() ++ "-upgrade-backup".
+is_disc_node_legacy() ->
+ %% This is pretty ugly but we can't start Mnesia and ask it (will
+ %% hang), we can't look at the config file (may not include us
+ %% even if we're a disc node). We also can't use
+ %% rabbit_mnesia:is_disc_node/0 because that will give false
+ %% postivies on Rabbit up to 2.5.1.
+ filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")).
+
%% NB: we cannot use rabbit_log here since it may not have been
%% started yet
info(Msg, Args) -> error_logger:info_msg(Msg, Args).
diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl
index 8d26866b..e0ca8cbb 100644
--- a/src/rabbit_upgrade_functions.erl
+++ b/src/rabbit_upgrade_functions.erl
@@ -34,6 +34,7 @@
-rabbit_upgrade({ha_mirrors, mnesia, []}).
-rabbit_upgrade({gm, mnesia, []}).
-rabbit_upgrade({exchange_scratch, mnesia, [trace_exchanges]}).
+-rabbit_upgrade({mirrored_supervisor, mnesia, []}).
%% -------------------------------------------------------------------
@@ -52,6 +53,7 @@
-spec(ha_mirrors/0 :: () -> 'ok').
-spec(gm/0 :: () -> 'ok').
-spec(exchange_scratch/0 :: () -> 'ok').
+-spec(mirrored_supervisor/0 :: () -> 'ok').
-endif.
@@ -170,6 +172,11 @@ exchange_scratch(Table) ->
end,
[name, type, durable, auto_delete, internal, arguments, scratch]).
+mirrored_supervisor() ->
+ create(mirrored_sup_childspec,
+ [{record_name, mirrored_sup_childspec},
+ {attributes, [key, mirroring_pid, childspec]}]).
+
%%--------------------------------------------------------------------
transform(TableName, Fun, FieldList) ->
diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl
index 400abc10..f6bcbb7f 100644
--- a/src/rabbit_version.erl
+++ b/src/rabbit_version.erl
@@ -49,12 +49,12 @@
%% -------------------------------------------------------------------
-recorded() -> case rabbit_misc:read_term_file(schema_filename()) of
+recorded() -> case rabbit_file:read_term_file(schema_filename()) of
{ok, [V]} -> {ok, V};
{error, _} = Err -> Err
end.
-record(V) -> ok = rabbit_misc:write_term_file(schema_filename(), [V]).
+record(V) -> ok = rabbit_file:write_term_file(schema_filename(), [V]).
recorded_for_scope(Scope) ->
case recorded() of
diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl
index 08d6c99a..38bb76b0 100644
--- a/src/rabbit_vhost.erl
+++ b/src/rabbit_vhost.erl
@@ -44,6 +44,7 @@
-define(INFO_KEYS, [name, tracing]).
add(VHostPath) ->
+ rabbit_log:info("Adding vhost '~s'~n", [VHostPath]),
R = rabbit_misc:execute_mnesia_transaction(
fun () ->
case mnesia:wread({rabbit_vhost, VHostPath}) of
@@ -69,7 +70,6 @@ add(VHostPath) ->
{<<"amq.rabbitmq.trace">>, topic}]],
ok
end),
- rabbit_log:info("Added vhost ~p~n", [VHostPath]),
R.
delete(VHostPath) ->
@@ -78,6 +78,7 @@ delete(VHostPath) ->
%% process, which in turn results in further mnesia actions and
%% eventually the termination of that process. Exchange deletion causes
%% notifications which must be sent outside the TX
+ rabbit_log:info("Deleting vhost '~s'~n", [VHostPath]),
[{ok,_} = rabbit_amqqueue:delete(Q, false, false) ||
Q <- rabbit_amqqueue:list(VHostPath)],
[ok = rabbit_exchange:delete(Name, false) ||
@@ -86,7 +87,6 @@ delete(VHostPath) ->
with(VHostPath, fun () ->
ok = internal_delete(VHostPath)
end)),
- rabbit_log:info("Deleted vhost ~p~n", [VHostPath]),
R.
internal_delete(VHostPath) ->
diff --git a/src/rabbit_writer.erl b/src/rabbit_writer.erl
index ac3434d2..091b50e4 100644
--- a/src/rabbit_writer.erl
+++ b/src/rabbit_writer.erl
@@ -67,6 +67,9 @@
non_neg_integer(), rabbit_types:protocol())
-> 'ok').
+-spec(mainloop/2 :: (_,_) -> 'done').
+-spec(mainloop1/2 :: (_,_) -> any()).
+
-endif.
%%---------------------------------------------------------------------------
diff --git a/src/supervisor2.erl b/src/supervisor2.erl
index ec1ee9cd..405949ef 100644
--- a/src/supervisor2.erl
+++ b/src/supervisor2.erl
@@ -76,7 +76,6 @@
%% Internal exports
-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3]).
-export([handle_cast/2]).
--export([delayed_restart/2]).
-define(DICT, dict).
@@ -157,9 +156,6 @@ check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
end;
check_childspecs(X) -> {error, {badarg, X}}.
-delayed_restart(Supervisor, RestartDetails) ->
- gen_server:cast(Supervisor, {delayed_restart, RestartDetails}).
-
%%% ---------------------------------------------------
%%%
%%% Initialize the supervisor.
@@ -355,12 +351,19 @@ handle_call(which_children, _From, State) ->
State#state.children),
{reply, Resp, State}.
+%%% Hopefully cause a function-clause as there is no API function
+%%% that utilizes cast.
+handle_cast(null, State) ->
+ error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n",
+ []),
+
+ {noreply, State}.
-handle_cast({delayed_restart, {RestartType, Reason, Child}}, State)
+handle_info({delayed_restart, {RestartType, Reason, Child}}, State)
when ?is_simple(State) ->
{ok, NState} = do_restart(RestartType, Reason, Child, State),
{noreply, NState};
-handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) ->
+handle_info({delayed_restart, {RestartType, Reason, Child}}, State) ->
case get_child(Child#child.name, State) of
{value, Child1} ->
{ok, NState} = do_restart(RestartType, Reason, Child1, State),
@@ -369,14 +372,6 @@ handle_cast({delayed_restart, {RestartType, Reason, Child}}, State) ->
{noreply, State}
end;
-%%% Hopefully cause a function-clause as there is no API function
-%%% that utilizes cast.
-handle_cast(null, State) ->
- error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n",
- []),
-
- {noreply, State}.
-
%%
%% Take care of terminated children.
%%
@@ -539,9 +534,9 @@ do_restart({RestartType, Delay}, Reason, Child, State) ->
{ok, NState} ->
{ok, NState};
{terminate, NState} ->
- {ok, _TRef} = timer:apply_after(
- trunc(Delay*1000), ?MODULE, delayed_restart,
- [self(), {{RestartType, Delay}, Reason, Child}]),
+ _TRef = erlang:send_after(trunc(Delay*1000), self(),
+ {delayed_restart,
+ {{RestartType, Delay}, Reason, Child}}),
{ok, state_del_child(Child, NState)}
end;
do_restart(permanent, Reason, Child, State) ->
diff --git a/src/tcp_acceptor_sup.erl b/src/tcp_acceptor_sup.erl
index bf0eacd1..4c835598 100644
--- a/src/tcp_acceptor_sup.erl
+++ b/src/tcp_acceptor_sup.erl
@@ -22,6 +22,14 @@
-export([init/1]).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+-spec(start_link/2 :: (atom(), mfa()) -> rabbit_types:ok_pid_or_error()).
+-endif.
+
+%%----------------------------------------------------------------------------
+
start_link(Name, Callback) ->
supervisor:start_link({local,Name}, ?MODULE, Callback).
diff --git a/src/tcp_listener.erl b/src/tcp_listener.erl
index cd646969..ad2a0d02 100644
--- a/src/tcp_listener.erl
+++ b/src/tcp_listener.erl
@@ -25,6 +25,14 @@
-record(state, {sock, on_startup, on_shutdown, label}).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+-spec(start_link/8 ::
+ (gen_tcp:ip_address(), integer(), rabbit_types:infos(), integer(),
+ atom(), mfa(), mfa(), string()) -> rabbit_types:ok_pid_or_error()).
+-endif.
+
%%--------------------------------------------------------------------
start_link(IPAddress, Port, SocketOpts,
diff --git a/src/tcp_listener_sup.erl b/src/tcp_listener_sup.erl
index 58c2f30c..5bff5c27 100644
--- a/src/tcp_listener_sup.erl
+++ b/src/tcp_listener_sup.erl
@@ -22,6 +22,21 @@
-export([init/1]).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/7 ::
+ (gen_tcp:ip_address(), integer(), rabbit_types:infos(), mfa(), mfa(),
+ mfa(), string()) -> rabbit_types:ok_pid_or_error()).
+-spec(start_link/8 ::
+ (gen_tcp:ip_address(), integer(), rabbit_types:infos(), mfa(), mfa(),
+ mfa(), integer(), string()) -> rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
AcceptCallback, Label) ->
start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
diff --git a/src/test_sup.erl b/src/test_sup.erl
index 84c4121c..5feb146f 100644
--- a/src/test_sup.erl
+++ b/src/test_sup.erl
@@ -21,6 +21,18 @@
-export([test_supervisor_delayed_restart/0,
init/1, start_child/0]).
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(test_supervisor_delayed_restart/0 :: () -> 'passed').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
test_supervisor_delayed_restart() ->
passed = with_sup(simple_one_for_one_terminate,
fun (SupPid) ->
diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl
index fb2fa267..a54bf996 100644
--- a/src/vm_memory_monitor.erl
+++ b/src/vm_memory_monitor.erl
@@ -57,15 +57,15 @@
-ifdef(use_specs).
--spec(start_link/1 :: (float()) -> {'ok', pid()} | {'error', any()}).
+-spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()).
-spec(update/0 :: () -> 'ok').
-spec(get_total_memory/0 :: () -> (non_neg_integer() | 'unknown')).
-spec(get_vm_limit/0 :: () -> non_neg_integer()).
--spec(get_memory_limit/0 :: () -> non_neg_integer()).
-spec(get_check_interval/0 :: () -> non_neg_integer()).
-spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok').
-spec(get_vm_memory_high_watermark/0 :: () -> float()).
-spec(set_vm_memory_high_watermark/1 :: (float()) -> 'ok').
+-spec(get_memory_limit/0 :: () -> non_neg_integer()).
-endif.
diff --git a/src/worker_pool.erl b/src/worker_pool.erl
index e4f260cc..456ff39f 100644
--- a/src/worker_pool.erl
+++ b/src/worker_pool.erl
@@ -41,6 +41,7 @@
-spec(submit/1 :: (fun (() -> A) | {atom(), atom(), [any()]}) -> A).
-spec(submit_async/1 ::
(fun (() -> any()) | {atom(), atom(), [any()]}) -> 'ok').
+-spec(idle/1 :: (any()) -> 'ok').
-endif.
diff --git a/src/worker_pool_sup.erl b/src/worker_pool_sup.erl
index 28c1adc6..d37c3a0f 100644
--- a/src/worker_pool_sup.erl
+++ b/src/worker_pool_sup.erl
@@ -26,8 +26,8 @@
-ifdef(use_specs).
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(start_link/1 :: (non_neg_integer()) -> {'ok', pid()} | {'error', any()}).
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_link/1 :: (non_neg_integer()) -> rabbit_types:ok_pid_or_error()).
-endif.