summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeng Hui Jiang <jiangph@cn.ibm.com>2020-04-08 01:40:11 +0800
committerGitHub <noreply@github.com>2020-04-08 01:40:11 +0800
commit6c1d7a993571d9e0e2304b12bbeaf8abb146cb0e (patch)
treea6d19f45bdc884a0b8a79a82a975f9799c8ef7eb
parent1d6799f5239af5e36d089ae605f943a13bb4ed99 (diff)
parentec12e1f54b5e0dab477c8489b72af051cc490070 (diff)
downloadcouchdb-6c1d7a993571d9e0e2304b12bbeaf8abb146cb0e.tar.gz
Merge pull request #2666 from apache/db-softdeletion
soft-deletion for database
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl10
-rw-r--r--src/chttpd/src/chttpd_misc.erl117
-rw-r--r--src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl234
-rw-r--r--src/fabric/include/fabric2.hrl1
-rw-r--r--src/fabric/src/fabric2_db.erl147
-rw-r--r--src/fabric/src/fabric2_fdb.erl132
-rw-r--r--src/fabric/src/fabric2_util.erl16
-rw-r--r--src/fabric/test/fabric2_db_crud_tests.erl233
8 files changed, 847 insertions, 43 deletions
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index be6c0a13e..3fd56c354 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -28,6 +28,7 @@ url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler(<<"_deleted_dbs">>) -> fun chttpd_misc:handle_deleted_dbs_req/1;
url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
@@ -67,6 +68,15 @@ handler_info('GET', [<<"_active_tasks">>], _) ->
handler_info('GET', [<<"_all_dbs">>], _) ->
{'all_dbs.read', #{}};
+handler_info('GET', [<<"_deleted_dbs">>], _) ->
+ {'account-deleted-dbs.read', #{}};
+
+handler_info('POST', [<<"_deleted_dbs">>], _) ->
+ {'account-deleted-dbs.undelete', #{}};
+
+handler_info('DELETE', [<<"_deleted_dbs">>, Db], _) ->
+ {'account-deleted-dbs.delete', #{'db.name' => Db}};
+
handler_info('POST', [<<"_dbs_info">>], _) ->
{'dbs_info.read', #{}};
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index ca1e58ad2..843c3fe7e 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -15,6 +15,7 @@
-export([
handle_all_dbs_req/1,
handle_dbs_info_req/1,
+ handle_deleted_dbs_req/1,
handle_favicon_req/1,
handle_favicon_req/2,
handle_replicate_req/1,
@@ -164,35 +165,7 @@ all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
-
- #mrargs{
- start_key = StartKey,
- end_key = EndKey,
- direction = Dir,
- limit = Limit,
- skip = Skip
- } = couch_mrview_http:parse_params(Req, undefined),
-
- Options = [
- {start_key, StartKey},
- {end_key, EndKey},
- {dir, Dir},
- {limit, Limit},
- {skip, Skip}
- ],
-
- % TODO: Figure out if we can't calculate a valid
- % ETag for this request. \xFFmetadataVersion won't
- % work as we don't bump versions on size changes
-
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, []),
- Callback = fun dbs_info_callback/2,
- Acc = #vacc{req = Req, resp = Resp},
- {ok, Resp} = fabric2_db:list_dbs_info(Callback, Acc, Options),
- case is_record(Resp, vacc) of
- true -> {ok, Resp#vacc.resp};
- _ -> {ok, Resp}
- end;
+ send_db_infos(Req, list_dbs_info);
handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
chttpd:validate_ctype(Req, "application/json"),
Props = chttpd:json_body_obj(Req),
@@ -226,6 +199,92 @@ handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
handle_dbs_info_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD,POST").
+handle_deleted_dbs_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+ ok = chttpd:verify_is_server_admin(Req),
+ send_db_infos(Req, list_deleted_dbs_info);
+handle_deleted_dbs_req(#httpd{method='POST', user_ctx=Ctx, path_parts=[_]}=Req) ->
+ couch_httpd:verify_is_server_admin(Req),
+ chttpd:validate_ctype(Req, "application/json"),
+ GetJSON = fun(Key, Props, Default) ->
+ case couch_util:get_value(Key, Props) of
+ undefined when Default == error ->
+ Fmt = "POST body must include `~s` parameter.",
+ Msg = io_lib:format(Fmt, [Key]),
+ throw({bad_request, iolist_to_binary(Msg)});
+ undefined ->
+ Default;
+ Value ->
+ Value
+ end
+ end,
+ {BodyProps} = chttpd:json_body_obj(Req),
+ {UndeleteProps} = GetJSON(<<"undelete">>, BodyProps, error),
+ DbName = GetJSON(<<"source">>, UndeleteProps, error),
+ TimeStamp = GetJSON(<<"timestamp">>, UndeleteProps, error),
+ TgtDbName = GetJSON(<<"target">>, UndeleteProps, DbName),
+ case fabric2_db:undelete(DbName, TgtDbName, TimeStamp, [{user_ctx, Ctx}]) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ {error, file_exists} ->
+ chttpd:send_error(Req, file_exists);
+ {error, not_found} ->
+ chttpd:send_error(Req, not_found);
+ Error ->
+ throw(Error)
+ end;
+handle_deleted_dbs_req(#httpd{path_parts = PP}=Req) when length(PP) == 1 ->
+ send_method_not_allowed(Req, "GET,HEAD,POST");
+handle_deleted_dbs_req(#httpd{method='DELETE', user_ctx=Ctx, path_parts=[_, DbName]}=Req) ->
+ couch_httpd:verify_is_server_admin(Req),
+ TS = case ?JSON_DECODE(couch_httpd:qs_value(Req, "timestamp", "null")) of
+ null ->
+ throw({bad_request, "`timestamp` parameter is not provided."});
+ TS0 ->
+ TS0
+ end,
+ case fabric2_db:delete(DbName, [{user_ctx, Ctx}, {deleted_at, TS}]) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ {error, not_found} ->
+ chttpd:send_error(Req, not_found);
+ Error ->
+ throw(Error)
+ end;
+handle_deleted_dbs_req(#httpd{path_parts = PP}=Req) when length(PP) == 2 ->
+ send_method_not_allowed(Req, "HEAD,DELETE");
+handle_deleted_dbs_req(Req) ->
+ chttpd:send_error(Req, not_found).
+
+send_db_infos(Req, ListFunctionName) ->
+ #mrargs{
+ start_key = StartKey,
+ end_key = EndKey,
+ direction = Dir,
+ limit = Limit,
+ skip = Skip
+ } = couch_mrview_http:parse_params(Req, undefined),
+
+ Options = [
+ {start_key, StartKey},
+ {end_key, EndKey},
+ {dir, Dir},
+ {limit, Limit},
+ {skip, Skip}
+ ],
+
+ % TODO: Figure out if we can't calculate a valid
+ % ETag for this request. \xFFmetadataVersion won't
+ % work as we don't bump versions on size changes
+
+ {ok, Resp1} = chttpd:start_delayed_json_response(Req, 200, []),
+ Callback = fun dbs_info_callback/2,
+ Acc = #vacc{req = Req, resp = Resp1},
+ {ok, Resp2} = fabric2_db:ListFunctionName(Callback, Acc, Options),
+ case is_record(Resp2, vacc) of
+ true -> {ok, Resp2#vacc.resp};
+ _ -> {ok, Resp2}
+ end.
+
dbs_info_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
{ok, Acc#vacc{resp = Resp1}};
diff --git a/src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl b/src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl
new file mode 100644
index 000000000..d6375c048
--- /dev/null
+++ b/src/chttpd/test/eunit/chttpd_deleted_dbs_test.erl
@@ -0,0 +1,234 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_deleted_dbs_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "chttpd_db_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ lists:concat(["http://", Addr, ":", Port, "/"]).
+
+
+teardown(_Url) ->
+ ok = config:delete("couchdb", "enable_database_recovery", false),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+
+create_db(Url) ->
+ {ok, Status, _, _} = http(put, Url, ""),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+
+delete_db(Url) ->
+ {ok, 200, _, _} = http(delete, Url).
+
+
+deleted_dbs_test_() ->
+ {
+ "chttpd deleted dbs tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0,
+ fun teardown/1,
+ [
+ fun should_return_error_for_unsupported_method/1,
+ fun should_list_deleted_dbs/1,
+ fun should_list_deleted_dbs_info/1,
+ fun should_undelete_db/1,
+ fun should_remove_deleted_db/1,
+ fun should_undelete_db_to_target_db/1,
+ fun should_not_undelete_db_to_existing_db/1
+ ]
+ }
+ }
+ }.
+
+
+should_return_error_for_unsupported_method(Url) ->
+ ?_test(begin
+ {ok, Code, _, Body} = http(delete, mk_url(Url)),
+
+ ?assertEqual(405, Code),
+ ?assertEqual(<<"method_not_allowed">>, get_json(<<"error">>, Body))
+ end).
+
+
+should_list_deleted_dbs(Url) ->
+ ?_test(begin
+ DbName1 = create_and_delete_db(Url),
+ DbName2 = create_and_delete_db(Url),
+ {ok, Code, _, Body} = http(get, mk_url(Url)),
+ DeletedDbs = get_db_names(Body),
+
+ ?assertEqual(200, Code),
+ ?assertEqual(true, lists:member(DbName1, DeletedDbs)),
+ ?assertEqual(true, lists:member(DbName2, DeletedDbs))
+ end).
+
+
+should_list_deleted_dbs_info(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, Body} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(Body),
+
+ ?assertEqual(DbName, couch_util:get_value(<<"db_name">>, Props))
+ end).
+
+
+should_undelete_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, ResultBody} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(ResultBody),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ ErlJSON = {[
+ {undelete, {[
+ {source, DbName},
+ {timestamp, TimeStamp}
+ ]}}
+ ]},
+
+ {ok, Code1, _, _} = http(get, Url ++ DbName),
+ ?assertEqual(404, Code1),
+
+ {ok, Code2, _, _} = http(post, mk_url(Url), ErlJSON),
+ ?assertEqual(200, Code2),
+
+ {ok, Code3, _, _} = http(get, Url ++ DbName),
+ ?assertEqual(200, Code3)
+ end).
+
+
+should_remove_deleted_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, Body1} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(Body1),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ {ok, Code, _, _} = http(delete, mk_url(Url, DbName, TimeStamp)),
+ ?assertEqual(200, Code),
+
+ {ok, _, _, Body2} = http(get, mk_url(Url, DbName)),
+ ?assertEqual([], jiffy:decode(Body2))
+ end).
+
+
+should_undelete_db_to_target_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, Body} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(Body),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ NewDbName = ?tempdb(),
+ ErlJSON = {[
+ {undelete, {[
+ {source, DbName},
+ {timestamp, TimeStamp},
+ {target, NewDbName}
+ ]}}
+ ]},
+
+ {ok, Code1, _, _} = http(get, Url ++ NewDbName),
+ ?assertEqual(404, Code1),
+
+ {ok, Code2, _, _} = http(post, mk_url(Url), ErlJSON),
+ ?assertEqual(200, Code2),
+
+ {ok, Code3, _, _} = http(get, Url ++ NewDbName),
+ ?assertEqual(200, Code3)
+ end).
+
+
+should_not_undelete_db_to_existing_db(Url) ->
+ ?_test(begin
+ DbName = create_and_delete_db(Url),
+ {ok, _, _, ResultBody} = http(get, mk_url(Url, DbName)),
+ [{Props}] = jiffy:decode(ResultBody),
+ TimeStamp = couch_util:get_value(<<"timestamp">>, Props),
+
+ NewDbName = ?tempdb(),
+ create_db(Url ++ NewDbName),
+ ErlJSON = {[
+ {undelete, {[
+ {source, DbName},
+ {timestamp, TimeStamp},
+ {target, NewDbName}
+ ]}}
+ ]},
+ {ok, Code2, _, ResultBody2} = http(post, mk_url(Url), ErlJSON),
+ ?assertEqual(412, Code2),
+ ?assertEqual(<<"file_exists">>, get_json(<<"error">>, ResultBody2))
+ end).
+
+
+create_and_delete_db(BaseUrl) ->
+ DbName = ?tempdb(),
+ DbUrl = BaseUrl ++ DbName,
+ create_db(DbUrl),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ delete_db(DbUrl),
+ DbName.
+
+
+http(Verb, Url) ->
+ Headers = [?CONTENT_JSON, ?AUTH],
+ test_request:Verb(Url, Headers).
+
+
+http(Verb, Url, Body) ->
+ Headers = [?CONTENT_JSON, ?AUTH],
+ test_request:Verb(Url, Headers, jiffy:encode(Body)).
+
+
+mk_url(Url) ->
+ Url ++ "/_deleted_dbs".
+
+
+mk_url(Url, DbName) ->
+ Url ++ "/_deleted_dbs?key=\"" ++ ?b2l(DbName) ++ "\"".
+
+
+mk_url(Url, DbName, TimeStamp) ->
+ Url ++ "/_deleted_dbs/" ++ ?b2l(DbName) ++ "?timestamp=\"" ++
+ ?b2l(TimeStamp) ++ "\"".
+
+
+get_json(Key, Body) ->
+ {Props} = jiffy:decode(Body),
+ couch_util:get_value(Key, Props).
+
+
+get_db_names(Body) ->
+ RevDbNames = lists:foldl(fun({DbInfo}, Acc) ->
+ DbName = couch_util:get_value(<<"db_name">>, DbInfo),
+ [DbName | Acc]
+ end, [], jiffy:decode(Body)),
+ lists:reverse(RevDbNames).
diff --git a/src/fabric/include/fabric2.hrl b/src/fabric/include/fabric2.hrl
index 0c0757567..e12762260 100644
--- a/src/fabric/include/fabric2.hrl
+++ b/src/fabric/include/fabric2.hrl
@@ -22,6 +22,7 @@
-define(CLUSTER_CONFIG, 0).
-define(ALL_DBS, 1).
-define(DB_HCA, 2).
+-define(DELETED_DBS, 3).
-define(DBS, 15).
-define(TX_IDS, 255).
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index fb6ae5176..3d6d9245e 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -17,6 +17,7 @@
create/2,
open/2,
delete/2,
+ undelete/4,
list_dbs/0,
list_dbs/1,
@@ -26,6 +27,10 @@
list_dbs_info/1,
list_dbs_info/3,
+ list_deleted_dbs_info/0,
+ list_deleted_dbs_info/1,
+ list_deleted_dbs_info/3,
+
check_is_admin/1,
check_is_member/1,
@@ -202,12 +207,30 @@ delete(DbName, Options) ->
% Delete doesn't check user_ctx, that's done at the HTTP API level
% here we just care to get the `database_does_not_exist` error thrown
Options1 = lists:keystore(user_ctx, 1, Options, ?ADMIN_CTX),
- {ok, Db} = open(DbName, Options1),
- Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
- fabric2_fdb:delete(TxDb)
- end),
- if Resp /= ok -> Resp; true ->
- fabric2_server:remove(DbName)
+ case lists:keyfind(deleted_at, 1, Options1) of
+ {deleted_at, TimeStamp} ->
+ fabric2_fdb:transactional(DbName, Options1, fun(TxDb) ->
+ fabric2_fdb:remove_deleted_db(TxDb, TimeStamp)
+ end);
+ false ->
+ {ok, Db} = open(DbName, Options1),
+ Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ fabric2_fdb:delete(TxDb)
+ end),
+ if Resp /= ok -> Resp; true ->
+ fabric2_server:remove(DbName)
+ end
+ end.
+
+
+undelete(DbName, TgtDbName, TimeStamp, Options) ->
+ case validate_dbname(TgtDbName) of
+ ok ->
+ fabric2_fdb:transactional(DbName, Options, fun(TxDb) ->
+ fabric2_fdb:undelete(TxDb, TgtDbName, TimeStamp)
+ end);
+ Error ->
+ Error
end.
@@ -283,6 +306,87 @@ list_dbs_info(UserFun, UserAcc0, Options) ->
end).
+list_deleted_dbs_info() ->
+ list_deleted_dbs_info([]).
+
+
+list_deleted_dbs_info(Options) ->
+ Callback = fun(Value, Acc) ->
+ NewAcc = case Value of
+ {meta, _} -> Acc;
+ {row, DbInfo} -> [DbInfo | Acc];
+ complete -> Acc
+ end,
+ {ok, NewAcc}
+ end,
+ {ok, DbInfos} = list_deleted_dbs_info(Callback, [], Options),
+ {ok, lists:reverse(DbInfos)}.
+
+
+list_deleted_dbs_info(UserFun, UserAcc0, Options0) ->
+ Dir = fabric2_util:get_value(dir, Options0, fwd),
+ StartKey0 = fabric2_util:get_value(start_key, Options0),
+ EndKey0 = fabric2_util:get_value(end_key, Options0),
+
+ {FirstBinary, LastBinary} = case Dir of
+ fwd -> {<<>>, <<255>>};
+ rev -> {<<255>>, <<>>}
+ end,
+
+ StartKey1 = case StartKey0 of
+ undefined ->
+ {FirstBinary};
+ DbName0 when is_binary(DbName0) ->
+ {DbName0, FirstBinary};
+ [DbName0, TimeStamp0] when is_binary(DbName0), is_binary(TimeStamp0) ->
+ {DbName0, TimeStamp0};
+ BadStartKey ->
+ erlang:error({invalid_start_key, BadStartKey})
+ end,
+ EndKey1 = case EndKey0 of
+ undefined ->
+ {LastBinary};
+ DbName1 when is_binary(DbName1) ->
+ {DbName1, LastBinary};
+ [DbName1, TimeStamp1] when is_binary(DbName1), is_binary(TimeStamp1) ->
+ {DbName1, TimeStamp1};
+ BadEndKey ->
+ erlang:error({invalid_end_key, BadEndKey})
+ end,
+
+ Options1 = Options0 -- [{start_key, StartKey0}, {end_key, EndKey0}],
+ Options2 = [
+ {start_key, StartKey1},
+ {end_key, EndKey1},
+ {wrap_keys, false}
+ ] ++ Options1,
+
+ FoldFun = fun(DbName, TimeStamp, InfoFuture, {FutureQ, Count, Acc}) ->
+ NewFutureQ = queue:in({DbName, TimeStamp, InfoFuture}, FutureQ),
+ drain_deleted_info_futures(NewFutureQ, Count + 1, UserFun, Acc)
+ end,
+ fabric2_fdb:transactional(fun(Tx) ->
+ try
+ UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
+ InitAcc = {queue:new(), 0, UserAcc1},
+ {FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_deleted_dbs_info(
+ Tx,
+ FoldFun,
+ InitAcc,
+ Options2
+ ),
+ UserAcc3 = drain_all_deleted_info_futures(
+ FinalFutureQ,
+ UserFun,
+ UserAcc2
+ ),
+ {ok, maybe_stop(UserFun(complete, UserAcc3))}
+ catch throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
+ end
+ end).
+
+
is_admin(Db, {SecProps}) when is_list(SecProps) ->
case fabric2_db_plugin:check_is_admin(Db) of
true ->
@@ -1064,6 +1168,37 @@ drain_all_info_futures(FutureQ, UserFun, Acc) ->
end.
+drain_deleted_info_futures(FutureQ, Count, _UserFun, Acc) when Count < 100 ->
+ {FutureQ, Count, Acc};
+
+drain_deleted_info_futures(FutureQ, Count, UserFun, Acc) when Count >= 100 ->
+ {{value, {DbName, TimeStamp, Future}}, RestQ} = queue:out(FutureQ),
+ BaseProps = fabric2_fdb:get_info_wait(Future),
+ DeletedProps = BaseProps ++ [
+ {deleted, true},
+ {timestamp, TimeStamp}
+ ],
+ DbInfo = make_db_info(DbName, DeletedProps),
+ NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
+ {RestQ, Count - 1, NewAcc}.
+
+
+drain_all_deleted_info_futures(FutureQ, UserFun, Acc) ->
+ case queue:out(FutureQ) of
+ {{value, {DbName, TimeStamp, Future}}, RestQ} ->
+ BaseProps = fabric2_fdb:get_info_wait(Future),
+ DeletedProps = BaseProps ++ [
+ {deleted, true},
+ {timestamp, TimeStamp}
+ ],
+ DbInfo = make_db_info(DbName, DeletedProps),
+ NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
+ drain_all_deleted_info_futures(RestQ, UserFun, NewAcc);
+ {empty, _} ->
+ Acc
+ end.
+
+
new_revid(Db, Doc) ->
#doc{
id = DocId,
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 2295a5648..430693329 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -22,12 +22,15 @@
open/2,
ensure_current/1,
delete/1,
+ undelete/3,
+ remove_deleted_db/2,
exists/1,
get_dir/1,
list_dbs/4,
list_dbs_info/4,
+ list_deleted_dbs_info/4,
get_info/1,
get_info_future/2,
@@ -340,18 +343,70 @@ reopen(#{} = OldDb) ->
delete(#{} = Db) ->
+ DoRecovery = fabric2_util:do_recovery(),
+ case DoRecovery of
+ true -> soft_delete_db(Db);
+ false -> hard_delete_db(Db)
+ end.
+
+
+undelete(#{} = Db0, TgtDbName, TimeStamp) ->
#{
name := DbName,
tx := Tx,
- layer_prefix := LayerPrefix,
- db_prefix := DbPrefix
- } = ensure_current(Db),
+ layer_prefix := LayerPrefix
+ } = ensure_current(Db0, false),
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, TgtDbName}, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+ Bin when is_binary(Bin) ->
+ file_exists;
+ not_found ->
+ DeletedDbTupleKey = {
+ ?DELETED_DBS,
+ DbName,
+ TimeStamp
+ },
+ DeleteDbKey = erlfdb_tuple:pack(DeletedDbTupleKey, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DeleteDbKey)) of
+ not_found ->
+ not_found;
+ DbPrefix ->
+ erlfdb:set(Tx, DbKey, DbPrefix),
+ erlfdb:clear(Tx, DeleteDbKey),
+ bump_db_version(#{
+ tx => Tx,
+ db_prefix => DbPrefix
+ }),
+ ok
+ end
+ end.
- DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
- erlfdb:clear(Tx, DbKey),
- erlfdb:clear_range_startswith(Tx, DbPrefix),
- bump_metadata_version(Tx),
- ok.
+
+remove_deleted_db(#{} = Db0, TimeStamp) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix
+ } = ensure_current(Db0, false),
+
+ DeletedDbTupleKey = {
+ ?DELETED_DBS,
+ DbName,
+ TimeStamp
+ },
+ DeletedDbKey = erlfdb_tuple:pack(DeletedDbTupleKey, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DeletedDbKey)) of
+ not_found ->
+ not_found;
+ DbPrefix ->
+ erlfdb:clear(Tx, DeletedDbKey),
+ erlfdb:clear_range_startswith(Tx, DbPrefix),
+ bump_db_version(#{
+ tx => Tx,
+ db_prefix => DbPrefix
+ }),
+ ok
+ end.
exists(#{name := DbName} = Db) when is_binary(DbName) ->
@@ -401,6 +456,20 @@ list_dbs_info(Tx, Callback, AccIn, Options0) ->
end, AccIn, Options).
+list_deleted_dbs_info(Tx, Callback, AccIn, Options0) ->
+ Options = case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
+ LayerPrefix = get_dir(Tx),
+ Prefix = erlfdb_tuple:pack({?DELETED_DBS}, LayerPrefix),
+ fold_range({tx, Tx}, Prefix, fun({DbKey, DbPrefix}, Acc) ->
+ {DbName, TimeStamp} = erlfdb_tuple:unpack(DbKey, Prefix),
+ InfoFuture = get_info_future(Tx, DbPrefix),
+ Callback(DbName, TimeStamp, InfoFuture, Acc)
+ end, AccIn, Options).
+
+
get_info(#{} = Db) ->
#{
tx := Tx,
@@ -1186,6 +1255,45 @@ check_db_version(#{} = Db, CheckDbVersion) ->
end.
+soft_delete_db(Db) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+ Timestamp = list_to_binary(fabric2_util:iso8601_timestamp()),
+ DeletedDbKeyTuple = {?DELETED_DBS, DbName, Timestamp},
+ DeletedDbKey = erlfdb_tuple:pack(DeletedDbKeyTuple, LayerPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DeletedDbKey)) of
+ not_found ->
+ erlfdb:set(Tx, DeletedDbKey, DbPrefix),
+ erlfdb:clear(Tx, DbKey),
+ bump_db_version(Db),
+ ok;
+ _Val ->
+ {deletion_frequency_exceeded, DbName}
+ end.
+
+
+hard_delete_db(Db) ->
+ #{
+ name := DbName,
+ tx := Tx,
+ layer_prefix := LayerPrefix,
+ db_prefix := DbPrefix
+ } = ensure_current(Db),
+
+ DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
+
+ erlfdb:clear(Tx, DbKey),
+ erlfdb:clear_range_startswith(Tx, DbPrefix),
+ bump_metadata_version(Tx),
+ ok.
+
+
write_doc_body(#{} = Db0, #doc{} = Doc) ->
#{
tx := Tx
@@ -1514,6 +1622,7 @@ get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
EndKeyGt = fabric2_util:get_value(end_key_gt, Options),
EndKey0 = fabric2_util:get_value(end_key, Options, EndKeyGt),
InclusiveEnd = EndKeyGt == undefined,
+ WrapKeys = fabric2_util:get_value(wrap_keys, Options) /= false,
% CouchDB swaps the key meanings based on the direction
% of the fold. FoundationDB does not so we have to
@@ -1527,6 +1636,8 @@ get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
StartKey2 = case StartKey1 of
undefined ->
<<RangePrefix/binary, 16#00>>;
+ SK2 when not WrapKeys ->
+ erlfdb_tuple:pack(SK2, RangePrefix);
SK2 ->
erlfdb_tuple:pack({SK2}, RangePrefix)
end,
@@ -1534,9 +1645,14 @@ get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
EndKey2 = case EndKey1 of
undefined ->
<<RangePrefix/binary, 16#FF>>;
+ EK2 when Reverse andalso not WrapKeys ->
+ PackedEK = erlfdb_tuple:pack(EK2, RangePrefix),
+ <<PackedEK/binary, 16#FF>>;
EK2 when Reverse ->
PackedEK = erlfdb_tuple:pack({EK2}, RangePrefix),
<<PackedEK/binary, 16#FF>>;
+ EK2 when not WrapKeys ->
+ erlfdb_tuple:pack(EK2, RangePrefix);
EK2 ->
erlfdb_tuple:pack({EK2}, RangePrefix)
end,
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index 97bfedc2c..9b6d18c58 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -40,6 +40,9 @@
encode_all_doc_key/1,
all_docs_view_opts/1,
+ iso8601_timestamp/0,
+ do_recovery/0,
+
pmap/2,
pmap/3
]).
@@ -337,6 +340,19 @@ all_docs_view_opts(#mrargs{} = Args) ->
] ++ StartKeyOpts ++ EndKeyOpts.
+iso8601_timestamp() ->
+ Now = os:timestamp(),
+ {{Year, Month, Date}, {Hour, Minute, Second}} =
+ calendar:now_to_datetime(Now),
+ Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0BZ",
+ io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second]).
+
+
+do_recovery() ->
+ config:get_boolean("couchdb",
+ "enable_database_recovery", false).
+
+
pmap(Fun, Args) ->
pmap(Fun, Args, []).
diff --git a/src/fabric/test/fabric2_db_crud_tests.erl b/src/fabric/test/fabric2_db_crud_tests.erl
index f409389d6..d5025b987 100644
--- a/src/fabric/test/fabric2_db_crud_tests.erl
+++ b/src/fabric/test/fabric2_db_crud_tests.erl
@@ -37,6 +37,9 @@ crud_test_() ->
?TDEF_FE(open_db),
?TDEF_FE(delete_db),
?TDEF_FE(recreate_db),
+ ?TDEF_FE(undelete_db),
+ ?TDEF_FE(remove_deleted_db),
+ ?TDEF_FE(old_db_handle),
?TDEF_FE(list_dbs),
?TDEF_FE(list_dbs_user_fun),
?TDEF_FE(list_dbs_user_fun_partial),
@@ -44,6 +47,10 @@ crud_test_() ->
?TDEF_FE(list_dbs_info_partial),
?TDEF_FE(list_dbs_tx_too_old),
?TDEF_FE(list_dbs_info_tx_too_old),
+ ?TDEF_FE(list_deleted_dbs_info),
+ ?TDEF_FE(list_deleted_dbs_info_user_fun),
+ ?TDEF_FE(list_deleted_dbs_info_user_fun_partial),
+ ?TDEF_FE(list_deleted_dbs_info_with_timestamps),
?TDEF_FE(get_info_wait_retry_on_tx_too_old),
?TDEF_FE(get_info_wait_retry_on_tx_abort)
]
@@ -68,6 +75,7 @@ setup() ->
cleanup(_) ->
+ ok = config:set("couchdb", "enable_database_recovery", "false", false),
fabric2_test_util:tx_too_old_reset_errors(),
reset_fail_erfdb_wait(),
meck:reset([erlfdb]).
@@ -139,6 +147,123 @@ recreate_db(_) ->
?assertError(database_does_not_exist, fabric2_db:open(DbName, BadOpts)).
+undelete_db(_) ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ [DeletedDbInfo] = [Info || Info <- Infos,
+ DbName == proplists:get_value(db_name, Info)
+ ],
+ Timestamp = proplists:get_value(timestamp, DeletedDbInfo),
+
+ OldTS = <<"2020-01-01T12:00:00Z">>,
+ ?assertEqual(not_found, fabric2_db:undelete(DbName, DbName, OldTS, [])),
+ BadDbName = <<"bad_dbname">>,
+ ?assertEqual(not_found,
+ fabric2_db:undelete(BadDbName, BadDbName, Timestamp, [])),
+
+ ok = fabric2_db:undelete(DbName, DbName, Timestamp, []),
+ {ok, AllDbInfos} = fabric2_db:list_dbs_info(),
+ ?assert(is_db_info_member(DbName, AllDbInfos)).
+
+
+remove_deleted_db(_) ->
+ DbName = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName, [])),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(true, ets:member(fabric2_server, DbName)),
+
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ ?assertEqual(false, ets:member(fabric2_server, DbName)),
+
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ [DeletedDbInfo] = [Info || Info <- Infos,
+ DbName == proplists:get_value(db_name, Info)
+ ],
+ Timestamp = proplists:get_value(timestamp, DeletedDbInfo),
+ OldTS = <<"2020-01-01T12:00:00Z">>,
+ ?assertEqual(not_found,
+ fabric2_db:delete(DbName, [{deleted_at, OldTS}])),
+ BadDbName = <<"bad_dbname">>,
+ ?assertEqual(not_found,
+ fabric2_db:delete(BadDbName, [{deleted_at, Timestamp}])),
+
+ ok = fabric2_db:delete(DbName, [{deleted_at, Timestamp}]),
+ {ok, Infos2} = fabric2_db:list_deleted_dbs_info(),
+ DeletedDbs = [proplists:get_value(db_name, Info) || Info <- Infos2],
+ ?assert(not lists:member(DbName, DeletedDbs)).
+
+
+old_db_handle(_) ->
+ % db hard deleted
+ DbName1 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName1, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName1, [])),
+ {ok, Db1} = fabric2_db:open(DbName1, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db1)),
+ ?assertEqual(ok, fabric2_db:delete(DbName1, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db1)),
+
+ % db soft deleted
+ DbName2 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName2, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName2, [])),
+ {ok, Db2} = fabric2_db:open(DbName2, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db2)),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName2, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db2)),
+
+ % db soft deleted and re-created
+ DbName3 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName3, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName3, [])),
+ {ok, Db3} = fabric2_db:open(DbName3, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db3)),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName3, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName3, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db3)),
+
+ % db soft deleted and undeleted
+ DbName4 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName4, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName4, [])),
+ {ok, Db4} = fabric2_db:open(DbName4, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db4)),
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName4, [])),
+ {ok, Infos} = fabric2_db:list_deleted_dbs_info(),
+ [DeletedDbInfo] = [Info || Info <- Infos,
+ DbName4 == proplists:get_value(db_name, Info)
+ ],
+ Timestamp = proplists:get_value(timestamp, DeletedDbInfo),
+ ok = fabric2_db:undelete(DbName4, DbName4, Timestamp, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db4)),
+
+ % db hard deleted and re-created
+ DbName5 = ?tempdb(),
+ ?assertError(database_does_not_exist, fabric2_db:delete(DbName5, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName5, [])),
+ {ok, Db5} = fabric2_db:open(DbName5, []),
+ ?assertMatch({ok, _}, fabric2_db:get_db_info(Db5)),
+ ok = config:set("couchdb", "enable_database_recovery", "false", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName5, [])),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName5, [])),
+ ?assertError(database_does_not_exist, fabric2_db:get_db_info(Db5)).
+
+
list_dbs(_) ->
DbName = ?tempdb(),
AllDbs1 = fabric2_db:list_dbs(),
@@ -295,6 +420,108 @@ list_dbs_info_tx_too_old(_) ->
end, DbNames).
+list_deleted_dbs_info(_) ->
+ DbName = ?tempdb(),
+ AllDbs1 = fabric2_db:list_dbs(),
+
+ ?assert(is_list(AllDbs1)),
+ ?assert(not lists:member(DbName, AllDbs1)),
+
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ AllDbs2 = fabric2_db:list_dbs(),
+ ?assert(lists:member(DbName, AllDbs2)),
+
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+
+ AllDbs3 = fabric2_db:list_dbs(),
+ ?assert(not lists:member(DbName, AllDbs3)),
+ {ok, DeletedDbsInfo} = fabric2_db:list_deleted_dbs_info(),
+ DeletedDbs4 = get_deleted_dbs(DeletedDbsInfo),
+ ?assert(lists:member(DbName, DeletedDbs4)).
+
+
+list_deleted_dbs_info_user_fun(_) ->
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+
+ UserFun = fun(Row, Acc) -> {ok, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_deleted_dbs_info(UserFun, [], []),
+ {ok, DeletedDbsInfo} = fabric2_db:list_deleted_dbs_info(),
+
+ Base = lists:foldl(fun(DbInfo, Acc) ->
+ [{row, DbInfo} | Acc]
+ end, [{meta, []}], DeletedDbsInfo),
+ Expect = lists:reverse(Base, [complete]),
+
+ ?assertEqual(Expect, lists:reverse(UserAcc)).
+
+
+list_deleted_dbs_info_user_fun_partial(_) ->
+ UserFun = fun(Row, Acc) -> {stop, [Row | Acc]} end,
+ {ok, UserAcc} = fabric2_db:list_deleted_dbs_info(UserFun, [], []),
+ ?assertEqual([{meta, []}], UserAcc).
+
+
+list_deleted_dbs_info_with_timestamps(_) ->
+ ok = config:set("couchdb", "enable_database_recovery", "true", false),
+
+ % Cycle our database three times to get multiple entries
+ DbName = ?tempdb(),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ timer:sleep(1100),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+ timer:sleep(1100),
+ ?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
+ ?assertEqual(ok, fabric2_db:delete(DbName, [])),
+
+ UserFun = fun(Row, Acc) ->
+ case Row of
+ {row, Info} -> {ok, [Info | Acc]};
+ _ -> {ok, Acc}
+ end
+ end,
+
+ Options1 = [{start_key, DbName}, {end_key, <<DbName/binary, 255>>}],
+ {ok, Infos1} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options1),
+ TimeStamps1 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos1],
+ ?assertEqual(3, length(TimeStamps1)),
+
+ [FirstTS, MiddleTS, LastTS] = lists:sort(TimeStamps1),
+
+ % Check we can skip over the FirstTS
+ Options2 = [{start_key, [DbName, MiddleTS]}, {end_key, [DbName, LastTS]}],
+ {ok, Infos2} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options2),
+ TimeStamps2 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos2],
+ ?assertEqual(2, length(TimeStamps2)),
+ ?assertEqual([LastTS, MiddleTS], TimeStamps2), % because foldl reverses
+
+ % Check we an end before LastTS
+ Options3 = [{start_key, DbName}, {end_key, [DbName, MiddleTS]}],
+ {ok, Infos3} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options3),
+ TimeStamps3 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos3],
+ ?assertEqual([MiddleTS, FirstTS], TimeStamps3),
+
+ % Check that {dir, rev} works without timestamps
+ Options4 = [{start_key, DbName}, {end_key, DbName}, {dir, rev}],
+ {ok, Infos4} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options4),
+ TimeStamps4 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos4],
+ ?assertEqual([FirstTS, MiddleTS, LastTS], TimeStamps4),
+
+ % Check that reverse with keys returns correctly
+ Options5 = [
+ {start_key, [DbName, MiddleTS]},
+ {end_key, [DbName, FirstTS]},
+ {dir, rev}
+ ],
+ {ok, Infos5} = fabric2_db:list_deleted_dbs_info(UserFun, [], Options5),
+ TimeStamps5 = [fabric2_util:get_value(timestamp, Info) || Info <- Infos5],
+ ?assertEqual([FirstTS, MiddleTS], TimeStamps5).
+
+
get_info_wait_retry_on_tx_too_old(_) ->
DbName = ?tempdb(),
?assertMatch({ok, _}, fabric2_db:create(DbName, [])),
@@ -382,3 +609,9 @@ is_db_info_member(DbName, [DbInfo | RestInfos]) ->
_E ->
is_db_info_member(DbName, RestInfos)
end.
+
+get_deleted_dbs(DeletedDbInfos) ->
+ lists:foldl(fun(DbInfo, Acc) ->
+ DbName = fabric2_util:get_value(db_name, DbInfo),
+ [DbName | Acc]
+ end, [], DeletedDbInfos).