summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Newson <rnewson@apache.org>2019-02-05 17:04:54 +0000
committerRobert Newson <rnewson@apache.org>2019-02-05 18:40:17 +0000
commit250a509146bfb2f71fac07da61a7ff557b3cccab (patch)
treec8f5e857d7c0b56773605dd7370805d7c5685758
parentd0ec5408a8d6ab33d925271f21b8dc45dfb5cdc1 (diff)
downloadcouchdb-import-smoosh.tar.gz
Import smoosh from Cloudantimport-smoosh
Remove couch_compaction_daemon and related tests too.
-rwxr-xr-xdev/run3
-rw-r--r--rebar.config.script1
-rw-r--r--rel/overlay/etc/default.ini109
-rw-r--r--rel/reltool.config2
-rw-r--r--src/couch/src/couch_compaction_daemon.erl663
-rw-r--r--src/couch/src/couch_secondary_sup.erl3
-rw-r--r--src/couch/test/couchdb_compaction_daemon_tests.erl314
7 files changed, 11 insertions, 1084 deletions
diff --git a/dev/run b/dev/run
index e3181aae6..033878f3b 100755
--- a/dev/run
+++ b/dev/run
@@ -270,8 +270,7 @@ def setup_configs(ctx):
"backend_port": backend_port,
"fauxton_root": fauxton_root,
"uuid": "fake_uuid_for_dev",
- "_default": "",
- "compaction_daemon": "{}",
+ "_default": ""
}
write_config(ctx, node, env)
generate_haproxy_config(ctx)
diff --git a/rebar.config.script b/rebar.config.script
index 2a534e0dd..3f9b21749 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -92,6 +92,7 @@ DepDescs = [
{snappy, "snappy", {tag, "CouchDB-1.0.2"}},
{ioq, "ioq", {tag, "2.0.0"}},
{hqueue, "hqueue", {tag, "1.0.0"}},
+{smoosh, "smoosh", {tag, "1.0.0"}},
%% Non-Erlang deps
{docs, {url, "https://github.com/apache/couchdb-documentation"},
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 24428f3e5..2a920092f 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -114,15 +114,6 @@ max_db_number_for_dbs_info_req = 100
; prevent non-admins from accessing /_all_dbs
;admin_only_all_dbs = false
-[database_compaction]
-; larger buffer sizes can originate smaller files
-doc_buffer_size = 524288 ; value in bytes
-checkpoint_after = 5242880 ; checkpoint after every N bytes were written
-
-[view_compaction]
-; larger buffer sizes can originate smaller files
-keyvalue_buffer_size = 2097152 ; value in bytes
-
[couch_peruser]
; If enabled, couch_peruser ensures that a private per-user database
; exists for each document in _users. These databases are writable only
@@ -423,100 +414,6 @@ ssl_certificate_max_depth = 3
; or 403 response this setting is not needed.
;session_refresh_interval_sec = 550
-[compaction_daemon]
-; The delay, in seconds, between each check for which database and view indexes
-; need to be compacted.
-check_interval = 3600
-; If a database or view index file is smaller then this value (in bytes),
-; compaction will not happen. Very small files always have a very high
-; fragmentation therefore it's not worth to compact them.
-min_file_size = 131072
-; With lots of databases and/or with lots of design docs in one or more
-; databases, the compaction_daemon can create significant CPU load when
-; checking whether databases and view indexes need compacting. The
-; snooze_period_ms setting ensures a smoother CPU load. Defaults to
-; 3000 milliseconds wait. Note that this option was formerly called
-; snooze_period, measured in seconds (it is currently still supported).
-; snooze_period_ms = 3000
-
-[compactions]
-; List of compaction rules for the compaction daemon.
-; The daemon compacts databases and their respective view groups when all the
-; condition parameters are satisfied. Configuration can be per database or
-; global, and it has the following format:
-;
-; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
-; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
-;
-; Possible parameters:
-;
-; * db_fragmentation - If the ratio (as an integer percentage), of the amount
-; of old data (and its supporting metadata) over the database
-; file size is equal to or greater then this value, this
-; database compaction condition is satisfied.
-; This value is computed as:
-;
-; (file_size - data_size) / file_size * 100
-;
-; The data_size and file_size values can be obtained when
-; querying a database's information URI (GET /dbname/).
-;
-; * view_fragmentation - If the ratio (as an integer percentage), of the amount
-; of old data (and its supporting metadata) over the view
-; index (view group) file size is equal to or greater then
-; this value, then this view index compaction condition is
-; satisfied. This value is computed as:
-;
-; (file_size - data_size) / file_size * 100
-;
-; The data_size and file_size values can be obtained when
-; querying a view group's information URI
-; (GET /dbname/_design/groupname/_info).
-;
-; * from _and_ to - The period for which a database (and its view groups) compaction
-; is allowed. The value for these parameters must obey the format:
-;
-; HH:MM - HH:MM (HH in [0..23], MM in [0..59])
-;
-; * strict_window - If a compaction is still running after the end of the allowed
-; period, it will be canceled if this parameter is set to 'true'.
-; It defaults to 'false' and it's meaningful only if the *period*
-; parameter is also specified.
-;
-; * parallel_view_compaction - If set to 'true', the database and its views are
-; compacted in parallel. This is only useful on
-; certain setups, like for example when the database
-; and view index directories point to different
-; disks. It defaults to 'false'.
-;
-; Before a compaction is triggered, an estimation of how much free disk space is
-; needed is computed. This estimation corresponds to 2 times the data size of
-; the database or view index. When there's not enough free disk space to compact
-; a particular database or view index, a warning message is logged.
-;
-; Examples:
-;
-; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
-; The `foo` database is compacted if its fragmentation is 70% or more.
-; Any view index of this database is compacted only if its fragmentation
-; is 60% or more.
-;
-; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
-; Similar to the preceding example but a compaction (database or view index)
-; is only triggered if the current time is between midnight and 4 AM.
-;
-; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
-; Similar to the preceding example - a compaction (database or view index)
-; is only triggered if the current time is between midnight and 4 AM. If at
-; 4 AM the database or one of its views is still compacting, the compaction
-; process will be canceled.
-;
-; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
-; Similar to the preceding example, but a database and its views can be
-; compacted in parallel.
-;
-_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
-
[log]
; Possible log levels:
; debug
@@ -579,3 +476,9 @@ writer = stderr
[stats]
; Stats collection interval in seconds. Default 10 seconds.
;interval = 10
+
+[smoosh.ratio_dbs]
+min_priority = 2.0
+
+[smoosh.ratio_views]
+min_priority = 2.0
diff --git a/rel/reltool.config b/rel/reltool.config
index ba1afc271..90348c76d 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -57,6 +57,7 @@
mochiweb,
rexi,
setup,
+ smoosh,
snappy
]},
{rel, "start_clean", "", [kernel, stdlib]},
@@ -112,6 +113,7 @@
{app, mochiweb, [{incl_cond, include}]},
{app, rexi, [{incl_cond, include}]},
{app, setup, [{incl_cond, include}]},
+ {app, smoosh, [{incl_cond, include}]},
{app, snappy, [{incl_cond, include}]}
]}.
diff --git a/src/couch/src/couch_compaction_daemon.erl b/src/couch/src/couch_compaction_daemon.erl
deleted file mode 100644
index 2a46c3f26..000000000
--- a/src/couch/src/couch_compaction_daemon.erl
+++ /dev/null
@@ -1,663 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_compaction_daemon).
--behaviour(gen_server).
--vsn(1).
--behaviour(config_listener).
-
-% public API
--export([start_link/0, in_progress/0]).
-
-% gen_server callbacks
--export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("kernel/include/file.hrl").
-
--define(CONFIG_ETS, couch_compaction_daemon_config).
-
--define(RELISTEN_DELAY, 5000).
-
--record(state, {
- loop_pid,
- in_progress = []
-}).
-
--record(config, {
- db_frag = nil,
- view_frag = nil,
- period = nil,
- cancel = false,
- parallel_view_compact = false
-}).
-
--record(period, {
- from = nil,
- to = nil
-}).
-
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-in_progress() ->
- gen_server:call(?MODULE, in_progress).
-
-init(_) ->
- process_flag(trap_exit, true),
- ?CONFIG_ETS = ets:new(?CONFIG_ETS, [named_table, set, protected]),
- ok = config:listen_for_changes(?MODULE, nil),
- load_config(),
- Server = self(),
- Loop = spawn_link(fun() -> compact_loop(Server) end),
- {ok, #state{loop_pid = Loop}}.
-
-
-handle_cast({config_update, DbName, deleted}, State) ->
- true = ets:delete(?CONFIG_ETS, ?l2b(DbName)),
- {noreply, State};
-
-handle_cast({config_update, DbName, Config}, #state{loop_pid = Loop} = State) ->
- case parse_config(DbName, Config) of
- {ok, NewConfig} ->
- WasEmpty = (ets:info(?CONFIG_ETS, size) =:= 0),
- true = ets:insert(?CONFIG_ETS, {?l2b(DbName), NewConfig}),
- case WasEmpty of
- true ->
- Loop ! {self(), have_config};
- false ->
- ok
- end;
- error ->
- ok
- end,
- {noreply, State}.
-
-
-handle_call({start, DbName}, {Pid, _},
- #state{loop_pid = Pid, in_progress = InProgress} = State) ->
- {reply, ok, State#state{in_progress = [DbName|InProgress]}};
-handle_call({stop, DbName}, {Pid, _},
- #state{loop_pid = Pid, in_progress = InProgress} = State) ->
- {reply, ok, State#state{in_progress = InProgress -- [DbName]}};
-handle_call(in_progress, _From, #state{in_progress = InProgress} = State) ->
- {reply, InProgress, State};
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, State}.
-
-
-handle_info({'EXIT', Pid, Reason}, #state{loop_pid = Pid} = State) ->
- {stop, {compaction_loop_died, Reason}, State};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State}.
-
-
-terminate(_Reason, _State) ->
- true = ets:delete(?CONFIG_ETS).
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-handle_config_change("compactions", DbName, Value, _, _) ->
- {ok, gen_server:cast(?MODULE, {config_update, DbName, Value})};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-get_snooze_period() ->
- % The snooze_period_ms option should be used, but snooze_period is supported
- % for legacy reasons.
- Default = config:get_integer("compaction_daemon", "snooze_period", 3),
- case config:get_integer("compaction_daemon", "snooze_period_ms", -1) of
- -1 -> Default * 1000;
- SnoozePeriod -> SnoozePeriod
- end.
-
-compact_loop(Parent) ->
- {ok, _} = couch_server:all_databases(
- fun(DbName, Acc) ->
- case ets:info(?CONFIG_ETS, size) =:= 0 of
- true ->
- {stop, Acc};
- false ->
- case get_db_config(DbName) of
- nil ->
- ok;
- {ok, Config} ->
- case check_period(Config) of
- true ->
- maybe_compact_db(Parent, DbName, Config),
- ok = timer:sleep(get_snooze_period());
- false ->
- ok
- end
- end,
- {ok, Acc}
- end
- end, ok),
- case ets:info(?CONFIG_ETS, size) =:= 0 of
- true ->
- receive {Parent, have_config} -> ok end;
- false ->
- PausePeriod = config:get_integer("compaction_daemon", "check_interval", 3600),
- ok = timer:sleep(PausePeriod * 1000)
- end,
- compact_loop(Parent).
-
-
-maybe_compact_db(Parent, DbName, Config) ->
- case (catch couch_db:open_int(DbName, [?ADMIN_CTX])) of
- {ok, Db} ->
- DDocNames = db_ddoc_names(Db),
- case can_db_compact(Config, Db) of
- true ->
- gen_server:call(Parent, {start, DbName}),
- {ok, _} = couch_db:start_compact(Db),
- TimeLeft = compact_time_left(Config),
- {ViewsCompactPid, ViewsMonRef} = case Config#config.parallel_view_compact of
- true ->
- Pid = spawn_link(fun() ->
- maybe_compact_views(DbName, DDocNames, Config)
- end),
- Ref = erlang:monitor(process, Pid),
- {Pid, Ref};
- false ->
- {nil, nil}
- end,
- case couch_db:wait_for_compaction(Db, TimeLeft) of
- ok ->
- couch_db:close(Db),
- case Config#config.parallel_view_compact of
- true -> ok;
- false -> maybe_compact_views(DbName, DDocNames, Config)
- end;
- {error, timeout} ->
- couch_log:info("Compaction daemon - canceling compaction "
- "for databaes `~s` because exceeded the allowed time.",
- [DbName]),
- ok = couch_db:cancel_compact(Db),
- couch_db:close(Db);
- {error, Reason} ->
- couch_db:close(Db),
- couch_log:error("Compaction daemon - an error occurred while"
- " compacting the database `~s`: ~p", [DbName, Reason])
- end,
- case ViewsMonRef of
- nil ->
- ok;
- _ ->
- receive
- {'DOWN', ViewsMonRef, process, _, _Reason} ->
- ok
- after TimeLeft + 1000 ->
- % Under normal circunstances, the view compaction process
- % should have finished already.
- erlang:demonitor(ViewsMonRef, [flush]),
- unlink(ViewsCompactPid),
- exit(ViewsCompactPid, kill)
- end
- end,
- gen_server:call(Parent, {stop, DbName});
- false ->
- couch_db:close(Db),
- maybe_compact_views(DbName, DDocNames, Config)
- end;
- _ ->
- ok
- end.
-
-
-maybe_compact_views(_DbName, [], _Config) ->
- ok;
-maybe_compact_views(DbName, [DDocName | Rest], Config) ->
- case check_period(Config) of
- true ->
- case maybe_compact_view(DbName, DDocName, Config) of
- ok ->
- maybe_compact_views(DbName, Rest, Config);
- timeout ->
- ok
- end,
- ok = timer:sleep(get_snooze_period());
- false ->
- ok
- end.
-
-
-db_ddoc_names(Db) ->
- case couch_db:get_design_docs(Db) of
- {ok, DDocs} ->
- [ddoc_name(DDoc) || DDoc <- DDocs];
- Error ->
- ErrMsg = "Could not get design docs for ~p error:~p",
- couch_log:error(ErrMsg, [couch_db:name(Db), Error]),
- []
- end.
-
-
-% Node local docs will be FDIs while cluster ones will be ejson
-ddoc_name(#full_doc_info{id = <<"_design/", Id/binary>>}) ->
- Id;
-ddoc_name({Props}) ->
- DocId = proplists:get_value(<<"_id">>, Props),
- <<"_design/", GroupName/binary>> = DocId,
- GroupName.
-
-maybe_compact_view(DbName, GroupId, Config) ->
- DDocId = <<"_design/", GroupId/binary>>,
- case (catch couch_mrview:get_info(DbName, DDocId)) of
- {ok, GroupInfo} ->
- case can_view_compact(Config, DbName, GroupId, GroupInfo) of
- true ->
- {ok, MonRef} = couch_mrview:compact(DbName, DDocId, [monitor]),
- TimeLeft = compact_time_left(Config),
- receive
- {'DOWN', MonRef, process, _, normal} ->
- ok;
- {'DOWN', MonRef, process, _, Reason} ->
- couch_log:error("Compaction daemon - an error ocurred"
- " while compacting the view group `~s` from database "
- "`~s`: ~p", [GroupId, DbName, Reason]),
- ok
- after TimeLeft ->
- couch_log:info("Compaction daemon - canceling the compaction"
- " for the view group `~s` of the database `~s` because it's"
- " exceeding the allowed period.", [GroupId, DbName]),
- erlang:demonitor(MonRef, [flush]),
- ok = couch_mrview:cancel_compaction(DbName, DDocId),
- timeout
- end;
- false ->
- ok
- end;
- Error ->
- couch_log:error("Error opening view group `~s` from database `~s`: ~p",
- [GroupId, DbName, Error]),
- ok
- end.
-
-
-compact_time_left(#config{cancel = false}) ->
- infinity;
-compact_time_left(#config{period = nil}) ->
- infinity;
-compact_time_left(#config{period = #period{to = {ToH, ToM} = To}}) ->
- {H, M, _} = time(),
- case To > {H, M} of
- true ->
- ((ToH - H) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000);
- false ->
- ((24 - H + ToH) * 60 * 60 * 1000) + (abs(ToM - M) * 60 * 1000)
- end.
-
-
-get_db_config(ShardName) ->
- case ets:lookup(?CONFIG_ETS, ShardName) of
- [] ->
- DbName = mem3:dbname(ShardName),
- case ets:lookup(?CONFIG_ETS, DbName) of
- [] ->
- case ets:lookup(?CONFIG_ETS, <<"_default">>) of
- [] ->
- nil;
- [{<<"_default">>, Config}] ->
- {ok, Config}
- end;
- [{DbName, Config}] ->
- {ok, Config}
- end;
- [{ShardName, Config}] ->
- {ok, Config}
- end.
-
-
-can_db_compact(#config{db_frag = Threshold} = Config, Db) ->
- case check_period(Config) of
- false ->
- false;
- true ->
- {ok, DbInfo} = couch_db:get_db_info(Db),
- {Frag, SpaceRequired} = frag(DbInfo),
- couch_log:debug("Fragmentation for database `~s` is ~p%, estimated"
- " space for compaction is ~p bytes.",
- [couch_db:name(Db), Frag, SpaceRequired]),
- case check_frag(Threshold, Frag) of
- false ->
- false;
- true ->
- Free = free_space(config:get("couchdb", "database_dir")),
- case Free >= SpaceRequired of
- true ->
- true;
- false ->
- couch_log:warning("Compaction daemon - skipping database `~s` "
- "compaction: the estimated necessary disk space is about ~p"
- " bytes but the currently available disk space is ~p bytes.",
- [couch_db:name(Db), SpaceRequired, Free]),
- false
- end
- end
- end.
-
-can_view_compact(Config, DbName, GroupId, GroupInfo) ->
- case check_period(Config) of
- false ->
- false;
- true ->
- case couch_util:get_value(updater_running, GroupInfo) of
- true ->
- false;
- false ->
- {Frag, SpaceRequired} = frag(GroupInfo),
- couch_log:debug("Fragmentation for view group `~s` (database `~s`)"
- " is ~p%, estimated space for compaction is ~p bytes.",
- [GroupId, DbName, Frag, SpaceRequired]),
- case check_frag(Config#config.view_frag, Frag) of
- false ->
- false;
- true ->
- Free = free_space(couch_index_util:root_dir()),
- case Free >= SpaceRequired of
- true ->
- true;
- false ->
- couch_log:warning("Compaction daemon - skipping view group"
- " `~s` compaction (database `~s`): the estimated"
- " necessary disk space is about ~p bytes"
- " but the currently available disk space is ~p bytes.",
- [GroupId, DbName, SpaceRequired, Free]),
- false
- end
- end
- end
- end.
-
-
-check_period(#config{period = nil}) ->
- true;
-check_period(#config{period = #period{from = From, to = To}}) ->
- {HH, MM, _} = erlang:time(),
- case From < To of
- true ->
- ({HH, MM} >= From) andalso ({HH, MM} < To);
- false ->
- ({HH, MM} >= From) orelse ({HH, MM} < To)
- end.
-
-
-check_frag(nil, _) ->
- true;
-check_frag(Threshold, Frag) ->
- Frag >= Threshold.
-
-
-frag(Props) ->
- {Sizes} = couch_util:get_value(sizes, Props),
- FileSize = couch_util:get_value(file, Sizes),
- MinFileSize = list_to_integer(
- config:get("compaction_daemon", "min_file_size", "131072")),
- case FileSize < MinFileSize of
- true ->
- {0, FileSize};
- false ->
- case couch_util:get_value(active, Sizes) of
- 0 ->
- {0, FileSize};
- DataSize when is_integer(DataSize), DataSize > 0 ->
- Frag = round(((FileSize - DataSize) / FileSize * 100)),
- {Frag, space_required(DataSize)};
- _ ->
- {100, FileSize}
- end
- end.
-
-% Rough, and pessimistic, estimation of necessary disk space to compact a
-% database or view index.
-space_required(DataSize) ->
- round(DataSize * 2.0).
-
-
-load_config() ->
- lists:foreach(
- fun({DbName, ConfigString}) ->
- case parse_config(DbName, ConfigString) of
- {ok, Config} ->
- true = ets:insert(?CONFIG_ETS, {?l2b(DbName), Config});
- error ->
- ok
- end
- end,
- config:get("compactions")).
-
-parse_config(DbName, ConfigString) ->
- case (catch do_parse_config(ConfigString)) of
- {ok, Conf} ->
- {ok, Conf};
- incomplete_period ->
- couch_log:error("Incomplete period ('to' or 'from' missing)"
- " in the compaction configuration for database `~s`",
- [DbName]),
- error;
- _ ->
- couch_log:error("Invalid compaction configuration for database "
- "`~s`: `~s`", [DbName, ConfigString]),
- error
- end.
-
-do_parse_config(ConfigString) ->
- {ok, ConfProps} = couch_util:parse_term(ConfigString),
- {ok, #config{period = Period} = Conf} = config_record(ConfProps, #config{}),
- case Period of
- nil ->
- {ok, Conf};
- #period{from = From, to = To} when From =/= nil, To =/= nil ->
- {ok, Conf};
- #period{} ->
- incomplete_period
- end.
-
-config_record([], Config) ->
- {ok, Config};
-
-config_record([{db_fragmentation, V} | Rest], Config) ->
- [Frag] = string:tokens(V, "%"),
- config_record(Rest, Config#config{db_frag = list_to_integer(Frag)});
-
-config_record([{view_fragmentation, V} | Rest], Config) ->
- [Frag] = string:tokens(V, "%"),
- config_record(Rest, Config#config{view_frag = list_to_integer(Frag)});
-
-config_record([{from, V} | Rest], #config{period = Period0} = Config) ->
- Time = parse_time(V),
- Period = case Period0 of
- nil ->
- #period{from = Time};
- #period{} ->
- Period0#period{from = Time}
- end,
- config_record(Rest, Config#config{period = Period});
-
-config_record([{to, V} | Rest], #config{period = Period0} = Config) ->
- Time = parse_time(V),
- Period = case Period0 of
- nil ->
- #period{to = Time};
- #period{} ->
- Period0#period{to = Time}
- end,
- config_record(Rest, Config#config{period = Period});
-
-config_record([{strict_window, true} | Rest], Config) ->
- config_record(Rest, Config#config{cancel = true});
-
-config_record([{strict_window, false} | Rest], Config) ->
- config_record(Rest, Config#config{cancel = false});
-
-config_record([{parallel_view_compaction, true} | Rest], Config) ->
- config_record(Rest, Config#config{parallel_view_compact = true});
-
-config_record([{parallel_view_compaction, false} | Rest], Config) ->
- config_record(Rest, Config#config{parallel_view_compact = false}).
-
-
-parse_time(String) ->
- [HH, MM] = string:tokens(String, ":"),
- {list_to_integer(HH), list_to_integer(MM)}.
-
-
-free_space(Path) ->
- DiskData = lists:sort(
- fun({PathA, _, _}, {PathB, _, _}) ->
- length(filename:split(PathA)) > length(filename:split(PathB))
- end,
- disksup:get_disk_data()),
- {ok, AbsPath} = abs_path(Path),
- free_space_rec(AbsPath, DiskData).
-
-free_space_rec(_Path, []) ->
- undefined;
-free_space_rec(Path0, [{MountPoint, Total, Usage} | Rest]) ->
- case abs_path(Path0) of
- {ok, Path} ->
- case MountPoint =:= string:substr(Path, 1, length(MountPoint)) of
- false ->
- free_space_rec(Path, Rest);
- true ->
- trunc(Total - (Total * (Usage / 100))) * 1024
- end;
- {error, Reason} ->
- couch_log:debug("Compaction daemon - unable to calculate free space"
- " for `~s`: `~s` for mount mount `~p`",
- [Path0, Reason, MountPoint]),
- free_space_rec(Path0, Rest)
- end.
-
-abs_path(Path0) ->
- case file:read_link_info(Path0) of
- {ok, Info} ->
- case Info#file_info.type of
- symlink ->
- {ok, Path} = file:read_link(Path0),
- abs_path(Path);
- _ ->
- abs_path2(Path0)
- end;
- {error, Reason} ->
- {error, Reason}
- end.
-
-abs_path2(Path0) ->
- Path = filename:absname(Path0),
- case lists:last(Path) of
- $/ ->
- {ok, Path};
- _ ->
- {ok, Path ++ "/"}
- end.
-
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-free_space_rec_test() ->
- ?assertEqual(undefined, free_space_rec("", [])),
- ?assertEqual(51200, free_space_rec("/tmp/", [{"/", 100, 50}])),
- ?assertEqual(51200, free_space_rec("/tmp/", [
- {"/floop", 200, 25},
- {"/run", 0, 0},
- {"/", 100, 50}
- ])),
- ?assertEqual(undefined, free_space_rec("/flopp/", [{"/", 300, 75}])),
- ?assertEqual(undefined, free_space_rec("/flopp/", [
- {"/floop", 200, 25},
- {"/run", 0, 0},
- {"/", 100, 50}
- ])),
- ok.
-
-abs_path2_test() ->
- ?assertEqual({ok, "/a/"}, abs_path2("/a")),
- ?assertEqual({ok, "/a/"}, abs_path2("/a/")),
-
- ?assertEqual({ok, "/a/b/"}, abs_path2("/a/b")),
- ?assertEqual({ok, "/a/b/"}, abs_path2("/a/b")),
- ok.
-
-get_snooze_period_test_() ->
- {
- foreach,
- fun() ->
- meck:new(config, [passthrough])
- end,
- fun(_) ->
- meck:unload()
- end,
- [
- {"should return default value without config attributes",
- fun should_default_without_config/0},
- {"should respect old config attribute",
- fun should_respect_old_config/0},
- {"should respect old config set to zero",
- fun should_respect_old_config_zero/0},
- {"should respect new config attribute",
- fun should_respect_new_config/0},
- {"should respect new config set to zero",
- fun should_respect_new_config_zero/0}
- ]
- }.
-
-should_default_without_config() ->
- ?assertEqual(3000, get_snooze_period()).
-
-should_respect_old_config() ->
- meck:expect(config, get_integer, fun
- ("compaction_daemon", "snooze_period", _) -> 1;
- (_, _, Default) -> Default
- end),
- ?assertEqual(1000, get_snooze_period()).
-
-should_respect_old_config_zero() ->
- meck:expect(config, get_integer, fun
- ("compaction_daemon", "snooze_period", _) -> 0;
- (_, _, Default) -> Default
- end),
- ?assertEqual(0, get_snooze_period()).
-
-should_respect_new_config() ->
- meck:expect(config, get_integer, fun
- ("compaction_daemon", "snooze_period", _) -> 1;
- ("compaction_daemon", "snooze_period_ms", _) -> 300;
- (_, _, Default) -> Default
- end),
- ?assertEqual(300, get_snooze_period()).
-
-should_respect_new_config_zero() ->
- meck:expect(config, get_integer, fun
- ("compaction_daemon", "snooze_period", _) -> 1;
- ("compaction_daemon", "snooze_period_ms", _) -> 0;
- (_, _, Default) -> Default
- end),
- ?assertEqual(0, get_snooze_period()).
-
--endif.
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index 9b424dc6a..0f46ec85f 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -32,8 +32,7 @@ init([]) ->
{vhosts, {couch_httpd_vhost, start_link, []}},
{httpd, {couch_httpd, start_link, []}},
{uuids, {couch_uuids, start, []}},
- {auth_cache, {couch_auth_cache, start_link, []}},
- {compaction_daemon, {couch_compaction_daemon, start_link, []}}
+ {auth_cache, {couch_auth_cache, start_link, []}}
],
MaybeHttps = case https_enabled() of
diff --git a/src/couch/test/couchdb_compaction_daemon_tests.erl b/src/couch/test/couchdb_compaction_daemon_tests.erl
deleted file mode 100644
index 0ef2a4064..000000000
--- a/src/couch/test/couchdb_compaction_daemon_tests.erl
+++ /dev/null
@@ -1,314 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_compaction_daemon_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(TIMEOUT, 120000).
--define(TIMEOUT_S, ?TIMEOUT div 1000).
--define(MODS_TO_MOCK,
- [couch_db_updater, couch_mrview_compactor, couch_compaction_daemon]).
-
-
-start() ->
- Ctx = test_util:start_couch(),
- ok = config:set("compaction_daemon", "check_interval", "3", false),
- ok = config:set("compaction_daemon", "snooze_period_ms", "0", false),
- ok = config:set("compaction_daemon", "min_file_size", "100000", false),
- ok = config:delete("compactions", "_default", false),
- ok = meck:new(?MODS_TO_MOCK, [passthrough]),
- Ctx.
-
-stop(Ctx) ->
- test_util:stop_couch(Ctx),
- meck:unload(?MODS_TO_MOCK).
-
-setup() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- create_design_doc(Db),
- populate(DbName, 70, 70, 200 * 1024),
- ok = couch_db:close(Db),
- meck:reset(?MODS_TO_MOCK),
- DbName.
-
-teardown(DbName) ->
- Configs = config:get("compactions"),
- lists:foreach(
- fun({Key, _}) ->
- ok = config:delete("compactions", Key, false)
- end,
- Configs),
- couch_server:delete(DbName, [?ADMIN_CTX]),
- exit(whereis(couch_index_server), shutdown).
-
-
-compaction_daemon_test_() ->
- {
- "Compaction daemon tests",
- {
- setup,
- fun start/0, fun stop/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_compact_by_default_rule/1,
- fun should_compact_by_dbname_rule/1
- ]
- }
- }
- }.
-
-
-should_compact_by_default_rule(DbName) ->
- {timeout, ?TIMEOUT_S, ?_test(begin
- CompactionMonitor = spawn_compaction_monitor(DbName),
-
- {_, DbFileSize} = get_db_frag(DbName),
- {_, ViewFileSize} = get_view_frag(DbName),
-
- with_config_change(DbName, fun() ->
- ok = config:set("compactions", "_default",
- "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
- false)
- end),
-
- wait_for_compaction(CompactionMonitor),
-
- with_config_change(DbName, fun() ->
- ok = config:delete("compactions", "_default", false)
- end),
-
- {DbFrag2, DbFileSize2} = get_db_frag(DbName),
- {ViewFrag2, ViewFileSize2} = get_view_frag(DbName),
-
- ?assert(DbFrag2 < 70),
- ?assert(ViewFrag2 < 70),
-
- ?assert(DbFileSize > DbFileSize2),
- ?assert(ViewFileSize > ViewFileSize2),
-
- ?assert(is_idle(DbName))
- end)}.
-
-should_compact_by_dbname_rule(DbName) ->
- {timeout, ?TIMEOUT_S, ?_test(begin
- CompactionMonitor = spawn_compaction_monitor(DbName),
-
- {_, DbFileSize} = get_db_frag(DbName),
- {_, ViewFileSize} = get_view_frag(DbName),
-
- with_config_change(DbName, fun() ->
- ok = config:set("compactions", ?b2l(DbName),
- "[{db_fragmentation, \"70%\"}, {view_fragmentation, \"70%\"}]",
- false)
- end),
-
- wait_for_compaction(CompactionMonitor),
-
- with_config_change(DbName, fun() ->
- ok = config:delete("compactions", ?b2l(DbName), false)
- end),
-
- {DbFrag2, DbFileSize2} = get_db_frag(DbName),
- {ViewFrag2, ViewFileSize2} = get_view_frag(DbName),
-
- ?assert(DbFrag2 < 70),
- ?assert(ViewFrag2 < 70),
-
- ?assert(DbFileSize > DbFileSize2),
- ?assert(ViewFileSize > ViewFileSize2),
-
- ?assert(is_idle(DbName))
- end)}.
-
-
-create_design_doc(Db) ->
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo2">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}},
- {<<"foo3">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>}
- ]}}
- ]}}
- ]}),
- {ok, _} = couch_db:update_docs(Db, [DDoc]),
- {ok, _} = couch_db:ensure_full_commit(Db),
- ok.
-
-populate(DbName, DbFrag, ViewFrag, MinFileSize) ->
- {CurDbFrag, DbFileSize} = get_db_frag(DbName),
- {CurViewFrag, ViewFileSize} = get_view_frag(DbName),
- populate(DbName, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
- lists:min([DbFileSize, ViewFileSize])).
-
-populate(_Db, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag, FileSize)
- when CurDbFrag >= DbFrag, CurViewFrag >= ViewFrag, FileSize >= MinFileSize ->
- ok;
-populate(DbName, DbFrag, ViewFrag, MinFileSize, _, _, _) ->
- update(DbName),
- {CurDbFrag, DbFileSize} = get_db_frag(DbName),
- {CurViewFrag, ViewFileSize} = get_view_frag(DbName),
- populate(DbName, DbFrag, ViewFrag, MinFileSize, CurDbFrag, CurViewFrag,
- lists:min([DbFileSize, ViewFileSize])).
-
-update(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- lists:foreach(fun(_) ->
- Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}),
- {ok, _} = couch_db:update_docs(Db, [Doc]),
- query_view(couch_db:name(Db))
- end, lists:seq(1, 200)),
- couch_db:close(Db).
-
-db_url(DbName) ->
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName).
-
-query_view(DbName) ->
- {ok, Code, _Headers, _Body} = test_request:get(
- db_url(DbName) ++ "/_design/foo/_view/foo"),
- ?assertEqual(200, Code).
-
-get_db_frag(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_db:get_db_info(Db),
- couch_db:close(Db),
- FileSize = get_size(file, Info),
- DataSize = get_size(active, Info),
- {round((FileSize - DataSize) / FileSize * 100), FileSize}.
-
-get_view_frag(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- {ok, Info} = couch_mrview:get_info(Db, <<"_design/foo">>),
- couch_db:close(Db),
- FileSize = get_size(file, Info),
- DataSize = get_size(active, Info),
- {round((FileSize - DataSize) / FileSize * 100), FileSize}.
-
-get_size(Kind, Info) ->
- couch_util:get_nested_json_value({Info}, [sizes, Kind]).
-
-spawn_compaction_monitor(DbName) ->
- TestPid = self(),
- {Pid, Ref} = spawn_monitor(fun() ->
- DaemonPid = whereis(couch_compaction_daemon),
- DbPid = couch_util:with_db(DbName, fun(Db) ->
- couch_db:get_pid(Db)
- end),
- {ok, ViewPid} = couch_index_server:get_index(couch_mrview_index,
- DbName, <<"_design/foo">>),
- {ok, CompactorPid} = couch_index:get_compactor_pid(ViewPid),
- TestPid ! {self(), started},
- receive
- {TestPid, go} -> ok
- after ?TIMEOUT ->
- erlang:error(timeout)
- end,
- meck:wait(
- 1,
- couch_compaction_daemon,
- handle_cast,
- [{config_update, '_', '_'}, '_'],
- DaemonPid,
- ?TIMEOUT
- ),
- meck:wait(
- 1,
- couch_db_updater,
- handle_cast,
- [{compact_done, '_', '_'}, '_'],
- DbPid,
- ?TIMEOUT
- ),
- meck:reset(couch_mrview_compactor),
- meck:wait(
- 1,
- couch_mrview_compactor,
- compact,
- ['_', '_', '_'],
- ?TIMEOUT
- ),
- {ok, CPid} = couch_index_compactor:get_compacting_pid(CompactorPid),
- CRef = erlang:monitor(process, CPid),
- meck:wait(
- 1,
- couch_mrview_compactor,
- swap_compacted,
- ['_', '_'],
- ViewPid,
- ?TIMEOUT
- ),
- receive
- {'DOWN', CRef, process, _, _} -> ok
- after ?TIMEOUT ->
- erlang:error(timeout)
- end
- end),
- receive
- {Pid, started} -> ok;
- {'DOWN', Ref, _, _, Reason} -> erlang:error({monitor_failure, Reason})
- after ?TIMEOUT ->
- erlang:error({assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Compaction starting timeout"}
- ]})
- end,
- {Pid, Ref}.
-
-wait_for_compaction({Pid, Ref}) ->
- Pid ! {self(), go},
- receive
- {'DOWN', Ref, _, _, normal} -> ok;
- {'DOWN', Ref, _, _, Other} -> erlang:error(Other)
- after ?TIMEOUT ->
- erlang:error({assertion_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {reason, "Compaction finishing timeout"}
- ]})
- end.
-
-is_idle(DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- Monitors = couch_db:monitored_by(Db),
- ok = couch_db:close(Db),
- Others = [M || M <- Monitors, M /= self()],
- if Others == [] -> ok; true ->
- lists:foreach(fun(Other) ->
- Args = [Other, process_info(Other)],
- couch_log:error("XKCD: MONITORED BY ~p :: ~p", Args)
- end, Others)
- end,
- not lists:any(fun(M) -> M /= self() end, Monitors).
-
-with_config_change(_DbName, Fun) ->
- Current = ets:info(couch_compaction_daemon_config, size),
- Fun(),
- test_util:wait(fun() ->
- case ets:info(couch_compaction_daemon_config, size) == Current of
- false -> ok;
- true -> wait
- end
- end).