From 8b682a14fa8729a5152362ffb37d54b8deeddfd6 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 1 Feb 2017 15:15:09 -0600 Subject: Remove public access to the db record This completes the removal of public access to the db record from the couch application. The large majority of which is removing direct access to the #db.name, #db.main_pid, and #db.update_seq fields. COUCHDB-3288 --- src/chttpd/src/chttpd_db.erl | 21 +- src/chttpd/src/chttpd_external.erl | 22 +- src/chttpd/src/chttpd_show.erl | 3 +- src/couch/include/couch_db.hrl | 27 -- src/couch/src/couch_auth_cache.erl | 17 +- src/couch/src/couch_changes.erl | 56 ++-- src/couch/src/couch_compaction_daemon.erl | 4 +- src/couch/src/couch_db.erl | 55 ++++ src/couch/src/couch_db_int.hrl | 38 +++ src/couch/src/couch_db_plugin.erl | 6 +- src/couch/src/couch_db_updater.erl | 1 + src/couch/src/couch_httpd_db.erl | 12 +- src/couch/src/couch_users_db.erl | 8 +- src/couch/src/couch_util.erl | 15 +- src/couch/test/couch_auth_cache_tests.erl | 2 +- src/couch/test/couch_changes_tests.erl | 2 +- src/couch/test/couch_db_plugin_tests.erl | 13 +- src/couch/test/couch_server_tests.erl | 11 +- src/couch/test/couchdb_compaction_daemon_tests.erl | 2 +- src/couch/test/couchdb_views_tests.erl | 21 +- src/couch_index/src/couch_index_server.erl | 18 +- src/couch_index/src/couch_index_util.erl | 2 +- .../test/couch_index_compaction_tests.erl | 3 +- src/couch_mrview/src/couch_mrview.erl | 14 +- src/couch_mrview/src/couch_mrview_compactor.erl | 7 +- src/couch_mrview/src/couch_mrview_http.erl | 10 +- src/couch_mrview/src/couch_mrview_show.erl | 16 +- .../test/couch_mrview_all_docs_tests.erl | 2 +- .../test/couch_mrview_changes_since_tests.erl | 2 +- .../test/couch_mrview_collation_tests.erl | 2 +- .../test/couch_mrview_compact_tests.erl | 2 +- .../test/couch_mrview_ddoc_validation_tests.erl | 2 +- .../test/couch_mrview_index_changes_tests.erl | 2 +- .../test/couch_mrview_index_info_tests.erl | 2 +- .../test/couch_mrview_map_views_tests.erl | 2 +- .../test/couch_mrview_red_views_tests.erl | 2 +- src/couch_replicator/src/couch_replicator.erl | 33 ++- .../src/couch_replicator_api_wrap.erl | 27 +- .../src/couch_replicator_manager.erl | 8 +- .../src/couch_replicator_utils.erl | 40 +-- .../src/couch_replicator_worker.erl | 56 ++-- .../test/couch_replicator_compact_tests.erl | 27 +- src/fabric/include/couch_db_tmp.hrl | 296 --------------------- src/fabric/src/fabric.erl | 12 +- src/fabric/src/fabric_rpc.erl | 113 ++------ src/fabric/src/fabric_util.erl | 3 +- src/mango/src/mango_crud.erl | 2 +- src/mango/src/mango_cursor_text.erl | 4 +- src/mango/src/mango_httpd.erl | 3 +- src/mango/src/mango_idx.erl | 6 +- src/mango/src/mango_idx_text.erl | 9 +- src/mem3/src/mem3.erl | 17 +- src/mem3/src/mem3_httpd.erl | 4 +- src/mem3/src/mem3_nodes.erl | 10 +- src/mem3/src/mem3_rep.erl | 12 +- src/mem3/src/mem3_rpc.erl | 4 +- src/mem3/src/mem3_shards.erl | 4 +- 57 files changed, 440 insertions(+), 674 deletions(-) create mode 100644 src/couch/src/couch_db_int.hrl delete mode 100644 src/fabric/include/couch_db_tmp.hrl diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index 37e466908..aed649dc7 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -84,7 +84,7 @@ handle_changes_req1(#httpd{}=Req, Db) -> #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req), ChangesArgs = Args0#changes_args{ filter_fun = couch_changes:configure_filter(Raw, Style, Req, Db), - db_open_options = [{user_ctx, Db#db.user_ctx}] + db_open_options = [{user_ctx, couch_db:get_user_ctx(Db)}] }, Max = chttpd:chunked_response_buffer_size(), case ChangesArgs#changes_args.feed of @@ -253,7 +253,7 @@ handle_view_cleanup_req(Req, Db) -> handle_design_req(#httpd{ path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest] }=Req, Db) -> - DbName = mem3:dbname(Db#db.name), + DbName = mem3:dbname(couch_db:name(Db)), case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of {ok, DDoc} -> Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3), @@ -309,7 +309,8 @@ delete_db_req(#httpd{}=Req, DbName) -> do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) -> fabric:get_security(DbName, [{user_ctx,Ctx}]), % calls check_is_reader - Fun(Req, #db{name=DbName, user_ctx=Ctx}). + {ok, Db} = couch_db:clustered_db(DbName, Ctx), + Fun(Req, Db). db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) -> % measure the time required to generate the etag, see if it's worth it @@ -767,16 +768,17 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) -> } = parse_doc_query(Req), couch_doc:validate_docid(DocId), + DbName = couch_db:name(Db), W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))), Options = [{user_ctx,Ctx}, {w,W}], - Loc = absolute_uri(Req, [$/, couch_util:url_encode(Db#db.name), + Loc = absolute_uri(Req, [$/, couch_util:url_encode(DbName), $/, couch_util:url_encode(DocId)]), RespHeaders = [{"Location", Loc}], case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of ("multipart/related;" ++ _) = ContentType -> couch_httpd:check_max_request_length(Req), - couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(Db#db.name), DocId)), + couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)), {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType, fun() -> receive_request_data(Req) end), Doc = couch_doc_from_req(Req, DocId, Doc0), @@ -833,8 +835,9 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) -> HttpCode = 202 end, % respond + DbName = couch_db:name(Db), {PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}), - Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(Db#db.name) ++ "/" ++ couch_util:url_encode(TargetDocId)), + Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)), send_json(Req, HttpCode, [{"Location", Loc}, {"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}], @@ -1057,8 +1060,8 @@ couch_doc_from_req(Req, DocId, Json) -> % couch_doc_open(Db, DocId) -> % couch_doc_open(Db, DocId, nil, []). -couch_doc_open(#db{} = Db, DocId, Rev, Options0) -> - Options = [{user_ctx, Db#db.user_ctx} | Options0], +couch_doc_open(Db, DocId, Rev, Options0) -> + Options = [{user_ctx, couch_db:get_user_ctx(Db)} | Options0], case Rev of nil -> % open most recent rev case fabric:open_doc(Db, DocId, Options) of @@ -1258,7 +1261,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa HttpCode = 202 end, erlang:put(mochiweb_request_recv, true), - #db{name=DbName} = Db, + DbName = couch_db:name(Db), {Status, Headers} = case Method of 'DELETE' -> diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl index 4abeecb37..64664b98e 100644 --- a/src/chttpd/src/chttpd_external.erl +++ b/src/chttpd/src/chttpd_external.erl @@ -120,16 +120,22 @@ json_req_obj_field(<<"secObj">>, #httpd{user_ctx=UserCtx}, Db, _DocId) -> get_db_security(Db, UserCtx). -get_db_info(#db{main_pid = nil} = Db) -> - fabric:get_db_info(Db); -get_db_info(#db{} = Db) -> - couch_db:get_db_info(Db). +get_db_info(Db) -> + case couch_db:is_clustered(Db) of + true -> + fabric:get_db_info(Db); + false -> + couch_db:get_db_info(Db) + end. -get_db_security(#db{main_pid = nil}=Db, #user_ctx{}) -> - fabric:get_security(Db); -get_db_security(#db{}=Db, #user_ctx{}) -> - couch_db:get_security(Db). +get_db_security(Db, #user_ctx{}) -> + case couch_db:is_clustered(Db) of + true -> + fabric:get_security(Db); + false -> + couch_db:get_security(Db) + end. to_json_terms(Data) -> diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl index 49fed7b8d..0b45495d0 100644 --- a/src/chttpd/src/chttpd_show.erl +++ b/src/chttpd/src/chttpd_show.erl @@ -196,7 +196,8 @@ handle_view_list_req(Req, _Db, _DDoc) -> handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) -> %% Will throw an exception if the _list handler is missing couch_util:get_nested_json_value(DDoc#doc.body, [<<"lists">>, LName]), - {ok, VDoc} = ddoc_cache:open(Db#db.name, <<"_design/", ViewDesignName/binary>>), + DbName = couch_db:name(Db), + {ok, VDoc} = ddoc_cache:open(DbName, <<"_design/", ViewDesignName/binary>>), CB = fun couch_mrview_show:list_cb/2, QueryArgs = couch_mrview_http:parse_params(Req, Keys), Options = [{user_ctx, Req#httpd.user_ctx}], diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl index e7cd85d09..5abb31660 100644 --- a/src/couch/include/couch_db.hrl +++ b/src/couch/include/couch_db.hrl @@ -128,33 +128,6 @@ handler }). --record(db, { - main_pid = nil, - compactor_pid = nil, - instance_start_time, % number of microsecs since jan 1 1970 as a binary string - fd, - fd_monitor, - header = couch_db_header:new(), - committed_update_seq, - id_tree, - seq_tree, - local_tree, - update_seq, - name, - filepath, - validate_doc_funs = undefined, - security = [], - security_ptr = nil, - user_ctx = #user_ctx{}, - waiting_delayed_commit = nil, - revs_limit = 1000, - fsync_options = [], - options = [], - compression, - before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc - after_doc_read = nil % nil | fun(Doc, Db) -> NewDoc -}). - -record(view_fold_helper_funs, { reduce_count, passed_end, diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl index 9b00a9d84..54a67947b 100644 --- a/src/couch/src/couch_auth_cache.erl +++ b/src/couch/src/couch_auth_cache.erl @@ -289,8 +289,9 @@ reinit_cache(#state{db_mon_ref = Ref, closed = Closed} = State) -> true = ets:insert(?STATE, {auth_db_name, AuthDbName}), AuthDb = open_auth_db(), true = ets:insert(?STATE, {auth_db, AuthDb}), + DbPid = couch_db:get_pid(AuthDb), NewState#state{closed = [Ref|Closed], - db_mon_ref = erlang:monitor(process, AuthDb#db.main_pid)}. + db_mon_ref = erlang:monitor(process, DbPid)}. add_cache_entry(_, _, _, #state{max_cache_size = 0} = State) -> @@ -331,13 +332,15 @@ refresh_entries(AuthDb) -> nil -> ok; AuthDb2 -> - case AuthDb2#db.update_seq > AuthDb#db.update_seq of + AuthDbSeq = couch_db:get_update_seq(AuthDb), + AuthDb2Seq = couch_db:get_update_seq(AuthDb2), + case AuthDb2Seq > AuthDbSeq of true -> {ok, _, _} = couch_db:enum_docs_since( AuthDb2, - AuthDb#db.update_seq, + AuthDbSeq, fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end, - AuthDb#db.update_seq, + AuthDbSeq, [] ), true = ets:insert(?STATE, {auth_db, AuthDb2}); @@ -395,7 +398,9 @@ cache_needs_refresh() -> nil -> false; AuthDb2 -> - AuthDb2#db.update_seq > AuthDb#db.update_seq + AuthDbSeq = couch_db:get_update_seq(AuthDb), + AuthDb2Seq = couch_db:get_update_seq(AuthDb2), + AuthDb2Seq > AuthDbSeq end end, false @@ -416,7 +421,7 @@ exec_if_auth_db(Fun) -> exec_if_auth_db(Fun, DefRes) -> case ets:lookup(?STATE, auth_db) of - [{auth_db, #db{} = AuthDb}] -> + [{auth_db, AuthDb}] -> Fun(AuthDb); _ -> DefRes diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl index 52ff39ded..ea7f65c63 100644 --- a/src/couch/src/couch_changes.erl +++ b/src/couch/src/couch_changes.erl @@ -78,9 +78,10 @@ handle_changes(Args1, Req, Db0, Type) -> _ -> {false, undefined, undefined} end, + DbName = couch_db:name(Db0), {StartListenerFun, View} = if UseViewChanges -> {ok, {_, View0, _}, _, _} = couch_mrview_util:get_view( - Db0#db.name, DDocName, ViewName, #mrargs{}), + DbName, DDocName, ViewName, #mrargs{}), case View0#mrview.seq_btree of #btree{} -> ok; @@ -89,14 +90,14 @@ handle_changes(Args1, Req, Db0, Type) -> end, SNFun = fun() -> couch_event:link_listener( - ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, Db0#db.name}] + ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, DbName}] ) end, {SNFun, View0}; true -> SNFun = fun() -> couch_event:link_listener( - ?MODULE, handle_db_event, self(), [{dbname, Db0#db.name}] + ?MODULE, handle_db_event, self(), [{dbname, DbName}] ) end, {SNFun, undefined} @@ -111,7 +112,7 @@ handle_changes(Args1, Req, Db0, Type) -> end, View2 = if UseViewChanges -> {ok, {_, View1, _}, _, _} = couch_mrview_util:get_view( - Db0#db.name, DDocName, ViewName, #mrargs{}), + DbName, DDocName, ViewName, #mrargs{}), View1; true -> undefined @@ -219,11 +220,11 @@ configure_filter("_view", Style, Req, Db) -> catch _:_ -> view end, - case Db#db.id_tree of - undefined -> + case couch_db:is_clustered(Db) of + true -> DIR = fabric_util:doc_id_and_rev(DDoc), {fetch, FilterType, Style, DIR, VName}; - _ -> + false -> {FilterType, Style, DDoc, VName} end; [] -> @@ -242,11 +243,11 @@ configure_filter(FilterName, Style, Req, Db) -> [DName, FName] -> {ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>), check_member_exists(DDoc, [<<"filters">>, FName]), - case Db#db.id_tree of - undefined -> + case couch_db:is_clustered(Db) of + true -> DIR = fabric_util:doc_id_and_rev(DDoc), {fetch, custom, Style, Req, DIR, FName}; - _ -> + false-> {custom, Style, Req, DDoc, FName} end; @@ -395,15 +396,19 @@ check_fields(_Fields) -> throw({bad_request, "Selector error: fields must be JSON array"}). -open_ddoc(#db{name=DbName, id_tree=undefined}, DDocId) -> - case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of - {ok, _} = Resp -> Resp; - Else -> throw(Else) - end; open_ddoc(Db, DDocId) -> - case couch_db:open_doc(Db, DDocId, [ejson_body]) of - {ok, _} = Resp -> Resp; - Else -> throw(Else) + DbName = couch_db:name(Db), + case couch_db:is_clustered(Db) of + true -> + case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of + {ok, _} = Resp -> Resp; + Else -> throw(Else) + end; + false -> + case couch_db:open_doc(Db, DDocId, [ejson_body]) of + {ok, _} = Resp -> Resp; + Else -> throw(Else) + end end. @@ -566,7 +571,7 @@ can_optimize(_, _) -> send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) -> - Lookups = couch_btree:lookup(Db#db.id_tree, DocIds), + Lookups = couch_db:get_full_doc_infos(Db, DocIds), FullInfos = lists:foldl(fun ({ok, FDI}, Acc) -> [FDI | Acc]; (not_found, Acc) -> Acc @@ -575,11 +580,9 @@ send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) -> send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) -> - FoldFun = fun(FullDocInfo, _, Acc) -> - {ok, [FullDocInfo | Acc]} - end, + FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end, KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}], - {ok, _, FullInfos} = couch_btree:fold(Db#db.id_tree, FoldFun, [], KeyOpts), + {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], KeyOpts), send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0). @@ -640,8 +643,8 @@ keep_sending_changes(Args, Acc0, FirstRound) -> true -> case wait_updated(Timeout, TimeoutFun, UserAcc2) of {updated, UserAcc4} -> - DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions], - case couch_db:open(Db#db.name, DbOptions1) of + DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions], + case couch_db:open(couch_db:name(Db), DbOptions1) of {ok, Db2} -> keep_sending_changes( Args#changes_args{limit=NewLimit}, @@ -665,7 +668,8 @@ keep_sending_changes(Args, Acc0, FirstRound) -> maybe_refresh_view(_, undefined, undefined) -> undefined; maybe_refresh_view(Db, DDocName, ViewName) -> - {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(Db#db.name, DDocName, ViewName, #mrargs{}), + DbName = couch_db:name(Db), + {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(DbName, DDocName, ViewName, #mrargs{}), View. end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) -> diff --git a/src/couch/src/couch_compaction_daemon.erl b/src/couch/src/couch_compaction_daemon.erl index 8f95eb21e..f3b646d29 100644 --- a/src/couch/src/couch_compaction_daemon.erl +++ b/src/couch/src/couch_compaction_daemon.erl @@ -319,7 +319,7 @@ can_db_compact(#config{db_frag = Threshold} = Config, Db) -> {Frag, SpaceRequired} = frag(DbInfo), couch_log:debug("Fragmentation for database `~s` is ~p%, estimated" " space for compaction is ~p bytes.", - [Db#db.name, Frag, SpaceRequired]), + [couch_db:name(Db), Frag, SpaceRequired]), case check_frag(Threshold, Frag) of false -> false; @@ -332,7 +332,7 @@ can_db_compact(#config{db_frag = Threshold} = Config, Db) -> couch_log:warning("Compaction daemon - skipping database `~s` " "compaction: the estimated necessary disk space is about ~p" " bytes but the currently available disk space is ~p bytes.", - [Db#db.name, SpaceRequired, Free]), + [couch_db:name(Db), SpaceRequired, Free]), false end end diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 3a29a3d63..75fc7306e 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -22,6 +22,9 @@ incref/1, decref/1, + clustered_db/2, + clustered_db/3, + monitor/1, monitored_by/1, is_idle/1, @@ -32,21 +35,28 @@ name/1, compression/1, + get_after_doc_read_fun/1, + get_before_doc_update_fun/1, get_committed_update_seq/1, get_compacted_seq/1, + get_compactor_pid/1, get_db_info/1, get_doc_count/1, get_epochs/1, + get_filepath/1, get_instance_start_time/1, get_last_purged/1, get_pid/1, get_revs_limit/1, get_security/1, get_update_seq/1, + get_user_ctx/1, get_uuid/1, get_purge_seq/1, + is_db/1, is_system_db/1, + is_clustered/1, increment_update_seq/1, set_revs_limit/2, @@ -80,6 +90,8 @@ with_stream/3, + fold_docs/4, + fold_local_docs/4, enum_docs/4, enum_docs_reduce_to_count/1, @@ -113,6 +125,7 @@ -include_lib("couch/include/couch_db.hrl"). +-include("couch_db_int.hrl"). -define(DBNAME_REGEX, "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex @@ -187,6 +200,12 @@ reopen(#db{main_pid = Pid, fd = Fd, fd_monitor = OldRef, user_ctx = UserCtx}) -> {ok, NewDb#db{user_ctx = UserCtx, fd_monitor = NewRef}} end. +clustered_db(DbName, UserCtx) -> + clustered_db(DbName, UserCtx, []). + +clustered_db(DbName, UserCtx, SecProps) -> + {ok, #db{name = DbName, user_ctx = UserCtx, security = SecProps}}. + incref(#db{fd = Fd} = Db) -> Ref = erlang:monitor(process, Fd), {ok, Db#db{fd_monitor = Ref}}. @@ -195,9 +214,19 @@ decref(#db{fd_monitor = Monitor}) -> erlang:demonitor(Monitor, [flush]), ok. +is_db(#db{}) -> + true; +is_db(_) -> + false. + is_system_db(#db{options = Options}) -> lists:member(sys_db, Options). +is_clustered(#db{main_pid = nil}) -> + true; +is_clustered(#db{}) -> + false. + ensure_full_commit(#db{main_pid=Pid, instance_start_time=StartTime}) -> ok = gen_server:call(Pid, full_commit, infinity), {ok, StartTime}. @@ -378,12 +407,21 @@ increment_update_seq(#db{main_pid=Pid}) -> purge_docs(#db{main_pid=Pid}, IdsRevs) -> gen_server:call(Pid, {purge_docs, IdsRevs}). +get_after_doc_read_fun(#db{after_doc_read = Fun}) -> + Fun. + +get_before_doc_update_fun(#db{before_doc_update = Fun}) -> + Fun. + get_committed_update_seq(#db{committed_update_seq=Seq}) -> Seq. get_update_seq(#db{update_seq=Seq})-> Seq. +get_user_ctx(#db{user_ctx = UserCtx}) -> + UserCtx. + get_purge_seq(#db{}=Db) -> couch_db_header:purge_seq(Db#db.header). @@ -410,12 +448,18 @@ get_epochs(#db{}=Db) -> validate_epochs(Epochs), Epochs. +get_filepath(#db{filepath = FilePath}) -> + FilePath. + get_instance_start_time(#db{instance_start_time = IST}) -> IST. get_compacted_seq(#db{}=Db) -> couch_db_header:compacted_seq(Db#db.header). +get_compactor_pid(#db{compactor_pid = Pid}) -> + Pid. + get_db_info(Db) -> #db{fd=Fd, header=Header, @@ -1365,6 +1409,17 @@ enum_docs_since(Db, SinceSeq, InFun, Acc, Options) -> [{start_key, SinceSeq + 1} | Options]), {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}. + +fold_docs(Db, InFun, InAcc, Opts) -> + Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end, + {ok, _, AccOut} = couch_btree:fold(Db#db.id_tree, Wrapper, InAcc, Opts), + {ok, AccOut}. + +fold_local_docs(Db, InFun, InAcc, Opts) -> + Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end, + {ok, _, AccOut} = couch_btree:fold(Db#db.local_tree, Wrapper, InAcc, Opts), + {ok, AccOut}. + enum_docs(Db, InFun, InAcc, Options0) -> {NS, Options} = extract_namespace(Options0), enum_docs(Db, NS, InFun, InAcc, Options). diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl new file mode 100644 index 000000000..fc739b781 --- /dev/null +++ b/src/couch/src/couch_db_int.hrl @@ -0,0 +1,38 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-record(db, { + main_pid = nil, + compactor_pid = nil, + instance_start_time, % number of microsecs since jan 1 1970 as a binary string + fd, + fd_monitor, + header = couch_db_header:new(), + committed_update_seq, + id_tree, + seq_tree, + local_tree, + update_seq, + name, + filepath, + validate_doc_funs = undefined, + security = [], + security_ptr = nil, + user_ctx = #user_ctx{}, + waiting_delayed_commit = nil, + revs_limit = 1000, + fsync_options = [], + options = [], + compression, + before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc + after_doc_read = nil % nil | fun(Doc, Db) -> NewDoc +}). \ No newline at end of file diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl index 774e9e094..740b8121b 100644 --- a/src/couch/src/couch_db_plugin.erl +++ b/src/couch/src/couch_db_plugin.erl @@ -32,13 +32,15 @@ validate_dbname(DbName, Normalized, Default) -> maybe_handle(validate_dbname, [DbName, Normalized], Default). -before_doc_update(#db{before_doc_update = Fun} = Db, Doc0) -> +before_doc_update(Db, Doc0) -> + Fun = couch_db:get_before_doc_update_fun(Db), case with_pipe(before_doc_update, [Doc0, Db]) of [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db); [Doc1, _Db] -> Doc1 end. -after_doc_read(#db{after_doc_read = Fun} = Db, Doc0) -> +after_doc_read(Db, Doc0) -> + Fun = couch_db:get_after_doc_read_fun(Db), case with_pipe(after_doc_read, [Doc0, Db]) of [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db); [Doc1, _Db] -> Doc1 diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl index 270fffe46..8f6fc352f 100644 --- a/src/couch/src/couch_db_updater.erl +++ b/src/couch/src/couch_db_updater.erl @@ -20,6 +20,7 @@ -export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]). -include_lib("couch/include/couch_db.hrl"). +-include("couch_db_int.hrl"). -record(comp_header, { db_header, diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl index e1af1bfdc..1198a6786 100644 --- a/src/couch/src/couch_httpd_db.erl +++ b/src/couch/src/couch_httpd_db.erl @@ -70,7 +70,8 @@ handle_changes_req(#httpd{method='GET'}=Req, Db, ChangesArgs, ChangesFun) -> handle_changes_req(#httpd{}=Req, _Db, _ChangesArgs, _ChangesFun) -> couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST"). -handle_changes_req1(Req, #db{name=DbName}=Db, ChangesArgs, ChangesFun) -> +handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) -> + DbName = couch_db:name(Db), AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")), case AuthDbName of DbName -> @@ -287,7 +288,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) - RequiredSeq > CommittedSeq -> couch_db:ensure_full_commit(Db); true -> - {ok, Db#db.instance_start_time} + {ok, couch_db:get_instance_start_time(Db)} end end, send_json(Req, 201, {[ @@ -733,7 +734,8 @@ update_doc_result_to_json(DocId, Error) -> update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) -> - Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(Db#db.name) ++ "/" ++ couch_util:url_encode(DocId)), + DbName = couch_db:name(Db), + Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)), update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]); update_doc(Req, Db, DocId, Doc) -> update_doc(Req, Db, DocId, Doc, []). @@ -1033,7 +1035,7 @@ db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileN []; _ -> [{"Location", absolute_uri(Req, "/" ++ - couch_util:url_encode(Db#db.name) ++ "/" ++ + couch_util:url_encode(couch_db:name(Db)) ++ "/" ++ couch_util:url_encode(DocId) ++ "/" ++ couch_util:url_encode(FileName) )}] @@ -1145,7 +1147,7 @@ parse_changes_query(Req, Db) -> {"descending", "true"} -> Args#changes_args{dir=rev}; {"since", "now"} -> - UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) -> + UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) -> couch_db:get_update_seq(WDb) end), Args#changes_args{since=UpdateSeq}; diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl index 6f7b9af73..c7b41f1fc 100644 --- a/src/couch/src/couch_users_db.erl +++ b/src/couch/src/couch_users_db.erl @@ -39,8 +39,8 @@ % -> 404 // Not Found % Else % -> save_doc -before_doc_update(Doc, #db{user_ctx = UserCtx} = Db) -> - #user_ctx{name=Name} = UserCtx, +before_doc_update(Doc, Db) -> + #user_ctx{name=Name} = couch_db:get_user_ctx(Db), DocName = get_doc_name(Doc), case (catch couch_db:check_is_admin(Db)) of ok -> @@ -108,8 +108,8 @@ after_doc_read(#doc{id = <>} = Doc, Db) -> throw({forbidden, <<"Only administrators can view design docs in the users database.">>}) end; -after_doc_read(Doc, #db{user_ctx = UserCtx} = Db) -> - #user_ctx{name=Name} = UserCtx, +after_doc_read(Doc, Db) -> + #user_ctx{name=Name} = couch_db:get_user_ctx(Db), DocName = get_doc_name(Doc), case (catch couch_db:check_is_admin(Db)) of ok -> diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl index 6001ae2e4..d688c126f 100644 --- a/src/couch/src/couch_util.erl +++ b/src/couch/src/couch_util.erl @@ -198,7 +198,9 @@ json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) -> json_apply_field({Key, NewValue}, [], Acc) -> {[{Key, NewValue}|Acc]}. -json_user_ctx(#db{name=ShardName, user_ctx=Ctx}) -> +json_user_ctx(Db) -> + ShardName = couch_db:name(Db), + Ctx = couch_db:get_user_ctx(Db), {[{<<"db">>, mem3:dbname(ShardName)}, {<<"name">>,Ctx#user_ctx.name}, {<<"roles">>,Ctx#user_ctx.roles}]}. @@ -455,9 +457,7 @@ encode_doc_id(Id) -> url_encode(Id). -with_db(Db, Fun) when is_record(Db, db) -> - Fun(Db); -with_db(DbName, Fun) -> +with_db(DbName, Fun) when is_binary(DbName) -> case couch_db:open_int(DbName, [?ADMIN_CTX]) of {ok, Db} -> try @@ -467,6 +467,13 @@ with_db(DbName, Fun) -> end; Else -> throw(Else) + end; +with_db(Db, Fun) -> + case couch_db:is_db(Db) of + true -> + Fun(Db); + false -> + erlang:error({invalid_db, Db}) end. rfc1123_date() -> diff --git a/src/couch/test/couch_auth_cache_tests.erl b/src/couch/test/couch_auth_cache_tests.erl index 76179dea0..08aecd156 100644 --- a/src/couch/test/couch_auth_cache_tests.erl +++ b/src/couch/test/couch_auth_cache_tests.erl @@ -265,7 +265,7 @@ hash_password(Password) -> shutdown_db(DbName) -> {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), ok = couch_db:close(AuthDb), - couch_util:shutdown_sync(AuthDb#db.main_pid), + couch_util:shutdown_sync(couch_db:get_pid(AuthDb)), ok = timer:sleep(1000). get_doc_rev(DbName, UserName) -> diff --git a/src/couch/test/couch_changes_tests.erl b/src/couch/test/couch_changes_tests.erl index 3c0e5f69a..494d90fe3 100644 --- a/src/couch/test/couch_changes_tests.erl +++ b/src/couch/test/couch_changes_tests.erl @@ -645,7 +645,7 @@ should_filter_by_user_ctx({DbName, _}) -> ]}), ChArgs = #changes_args{filter = "app/valid"}, UserCtx = #user_ctx{name = <<"doc3">>, roles = []}, - DbRec = #db{name = DbName, user_ctx = UserCtx}, + {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx), Req = {json_req, {[{ <<"userCtx">>, couch_util:json_user_ctx(DbRec) }]}}, diff --git a/src/couch/test/couch_db_plugin_tests.erl b/src/couch/test/couch_db_plugin_tests.erl index ea9b230b1..94dd3dfa5 100644 --- a/src/couch/test/couch_db_plugin_tests.erl +++ b/src/couch/test/couch_db_plugin_tests.erl @@ -43,6 +43,7 @@ data_providers() -> []. data_subscriptions() -> []. processes() -> []. notify(_, _, _) -> ok. +fake_db() -> element(2, couch_db:clustered_db(fake, totes_fake)). setup() -> couch_tests:setup([ @@ -133,33 +134,33 @@ validate_dbname_pass() -> before_doc_update_match() -> ?assertMatch( {true, [before_doc_update, doc]}, - couch_db_plugin:before_doc_update(#db{}, {true, [doc]})). + couch_db_plugin:before_doc_update(fake_db(), {true, [doc]})). before_doc_update_no_match() -> ?assertMatch( {false, [doc]}, - couch_db_plugin:before_doc_update(#db{}, {false, [doc]})). + couch_db_plugin:before_doc_update(fake_db(), {false, [doc]})). before_doc_update_throw() -> ?assertThrow( before_doc_update, - couch_db_plugin:before_doc_update(#db{}, {fail, [doc]})). + couch_db_plugin:before_doc_update(fake_db(), {fail, [doc]})). after_doc_read_match() -> ?assertMatch( {true, [after_doc_read, doc]}, - couch_db_plugin:after_doc_read(#db{}, {true, [doc]})). + couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})). after_doc_read_no_match() -> ?assertMatch( {false, [doc]}, - couch_db_plugin:after_doc_read(#db{}, {false, [doc]})). + couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})). after_doc_read_throw() -> ?assertThrow( after_doc_read, - couch_db_plugin:after_doc_read(#db{}, {fail, [doc]})). + couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})). validate_docid_match() -> diff --git a/src/couch/test/couch_server_tests.erl b/src/couch/test/couch_server_tests.erl index c8f8381d7..4fd7ff2ab 100644 --- a/src/couch/test/couch_server_tests.erl +++ b/src/couch/test/couch_server_tests.erl @@ -32,8 +32,7 @@ setup(_) -> setup(). teardown(Db) -> - (catch couch_db:close(Db)), - (catch file:delete(Db#db.filepath)). + (catch couch_db:close(Db)). teardown(rename, Db) -> config:set("couchdb", "enable_database_recovery", "false", false), @@ -61,7 +60,9 @@ make_test_case(Mod, Funs) -> {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} }. -should_rename_on_delete(_, #db{filepath = Origin, name = DbName}) -> +should_rename_on_delete(_, Db) -> + DbName = couch_db:name(Db), + Origin = couch_db:get_filepath(Db), ?_test(begin ?assert(filelib:is_regular(Origin)), ?assertMatch(ok, couch_server:delete(DbName, [])), @@ -74,7 +75,9 @@ should_rename_on_delete(_, #db{filepath = Origin, name = DbName}) -> ?assert(filelib:is_regular(Renamed)) end). -should_delete(_, #db{filepath = Origin, name = DbName}) -> +should_delete(_, Db) -> + DbName = couch_db:name(Db), + Origin = couch_db:get_filepath(Db), ?_test(begin ?assert(filelib:is_regular(Origin)), ?assertMatch(ok, couch_server:delete(DbName, [])), diff --git a/src/couch/test/couchdb_compaction_daemon_tests.erl b/src/couch/test/couchdb_compaction_daemon_tests.erl index 25d9b131e..70e505e91 100644 --- a/src/couch/test/couchdb_compaction_daemon_tests.erl +++ b/src/couch/test/couchdb_compaction_daemon_tests.erl @@ -182,7 +182,7 @@ update(DbName) -> lists:foreach(fun(_) -> Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}), {ok, _} = couch_db:update_docs(Db, [Doc]), - query_view(Db#db.name) + query_view(couch_db:name(Db)) end, lists:seq(1, 200)), couch_db:close(Db). diff --git a/src/couch/test/couchdb_views_tests.erl b/src/couch/test/couchdb_views_tests.erl index f1fddfc1b..02e9d7211 100644 --- a/src/couch/test/couchdb_views_tests.erl +++ b/src/couch/test/couchdb_views_tests.erl @@ -340,7 +340,7 @@ couchdb_1283() -> ]}), {ok, _} = couch_db:update_doc(MDb1, DDoc, []), ok = populate_db(MDb1, 100, 100), - query_view(MDb1#db.name, "foo", "foo"), + query_view(couch_db:name(MDb1), "foo", "foo"), ok = couch_db:close(MDb1), {ok, Db1} = couch_db:create(?tempdb(), [?ADMIN_CTX]), @@ -350,8 +350,8 @@ couchdb_1283() -> {ok, Db3} = couch_db:create(?tempdb(), [?ADMIN_CTX]), ok = couch_db:close(Db3), - Writer1 = spawn_writer(Db1#db.name), - Writer2 = spawn_writer(Db2#db.name), + Writer1 = spawn_writer(couch_db:name(Db1)), + Writer2 = spawn_writer(couch_db:name(Db2)), ?assert(is_process_alive(Writer1)), ?assert(is_process_alive(Writer2)), @@ -361,16 +361,16 @@ couchdb_1283() -> %% Below we do exactly the same as couch_mrview:compact holds inside %% because we need have access to compaction Pid, not a Ref. - %% {ok, MonRef} = couch_mrview:compact(MDb1#db.name, <<"_design/foo">>, + %% {ok, MonRef} = couch_mrview:compact(MDb1, <<"_design/foo">>, %% [monitor]), {ok, Pid} = couch_index_server:get_index( - couch_mrview_index, MDb1#db.name, <<"_design/foo">>), + couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>), {ok, CPid} = gen_server:call(Pid, compact), %% By suspending compaction process we ensure that compaction won't get %% finished too early to make get_writer_status assertion fail. erlang:suspend_process(CPid), MonRef = erlang:monitor(process, CPid), - Writer3 = spawn_writer(Db3#db.name), + Writer3 = spawn_writer(couch_db:name(Db3)), ?assert(is_process_alive(Writer3)), ?assert(is_process_alive(Writer1)), @@ -526,7 +526,8 @@ view_cleanup(DbName) -> count_users(DbName) -> {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {monitored_by, Monitors} = erlang:process_info(Db#db.main_pid, monitored_by), + DbPid = couch_db:get_pid(Db), + {monitored_by, Monitors} = erlang:process_info(DbPid, monitored_by), CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined], ok = couch_db:close(Db), length(lists:usort(Monitors) -- [self() | CouchFiles]). @@ -552,7 +553,8 @@ restore_backup_db_file(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), ok = couch_db:close(Db), - exit(Db#db.main_pid, shutdown), + DbPid = couch_db:get_pid(Db), + exit(DbPid, shutdown), DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]), ok = file:delete(DbFile), @@ -573,7 +575,8 @@ wait_db_compact_done(_DbName, 0) -> wait_db_compact_done(DbName, N) -> {ok, Db} = couch_db:open_int(DbName, []), ok = couch_db:close(Db), - case is_pid(Db#db.compactor_pid) of + CompactorPid = couch_db:get_compactor_pid(Db), + case is_pid(CompactorPid) of false -> ok; true -> diff --git a/src/couch_index/src/couch_index_server.erl b/src/couch_index/src/couch_index_server.erl index 4e86f5e80..cbdfbe746 100644 --- a/src/couch_index/src/couch_index_server.erl +++ b/src/couch_index/src/couch_index_server.erl @@ -60,14 +60,13 @@ validate(DbName, DDoc) -> lists:foreach(ValidateFun, EnabledIndexers). -get_index(Module, #db{name = <<"shards/", _/binary>> = DbName}, DDoc) -> - case is_record(DDoc, doc) of - true -> get_index(Module, DbName, DDoc, nil); - false -> get_index(Module, DbName, DDoc) - end; +get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) + when is_binary(DbName), is_record(DDoc, doc) -> + get_index(Module, DbName, DDoc, nil); get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) -> {Pid, Ref} = spawn_monitor(fun() -> - exit(fabric:open_doc(mem3:dbname(DbName), DDoc, [ejson_body, ?ADMIN_CTX])) + OpenOpts = [ejson_body, ?ADMIN_CTX], + exit(fabric:open_doc(mem3:dbname(DbName), DDoc, OpenOpts)) end), receive {'DOWN', Ref, process, Pid, {ok, Doc}} -> get_index(Module, DbName, Doc, nil); @@ -77,9 +76,10 @@ get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) -> erlang:demonitor(Ref, [flush]), {error, timeout} end; - -get_index(Module, DbName, DDoc) -> - get_index(Module, DbName, DDoc, nil). +get_index(Module, DbName, DDoc) when is_binary(DbName) -> + get_index(Module, DbName, DDoc, nil); +get_index(Module, Db, DDoc) -> + get_index(Module, couch_db:name(Db), DDoc). get_index(Module, DbName, DDoc, Fun) when is_binary(DbName) -> diff --git a/src/couch_index/src/couch_index_util.erl b/src/couch_index/src/couch_index_util.erl index 5694641ca..dcb33b5b0 100644 --- a/src/couch_index/src/couch_index_util.erl +++ b/src/couch_index/src/couch_index_util.erl @@ -25,7 +25,7 @@ root_dir() -> index_dir(Module, DbName) when is_binary(DbName) -> DbDir = "." ++ binary_to_list(DbName) ++ "_design", filename:join([root_dir(), DbDir, Module]); -index_dir(Module, #db{}=Db) -> +index_dir(Module, Db) -> index_dir(Module, couch_db:name(Db)). diff --git a/src/couch_index/test/couch_index_compaction_tests.erl b/src/couch_index/test/couch_index_compaction_tests.erl index 0787151ae..18dd9ffe5 100644 --- a/src/couch_index/test/couch_index_compaction_tests.erl +++ b/src/couch_index/test/couch_index_compaction_tests.erl @@ -23,7 +23,8 @@ setup() -> ?assertNot(is_opened(Db)), {Db, IndexerPid}. -fake_index(#db{name = DbName} = Db) -> +fake_index(Db) -> + DbName = couch_db:name(Db), ok = meck:new([test_index], [non_strict]), ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}), ok = meck:expect(test_index, open, fun(_Db, State) -> diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl index 088327c45..0f207fb5c 100644 --- a/src/couch_mrview/src/couch_mrview.erl +++ b/src/couch_mrview/src/couch_mrview.erl @@ -360,15 +360,12 @@ get_view_info(Db, DDoc, VName) -> %% @doc refresh a view index -refresh(#db{name=DbName}, DDoc) -> - refresh(DbName, DDoc); - -refresh(Db, DDoc) -> - UpdateSeq = couch_util:with_db(Db, fun(WDb) -> +refresh(DbName, DDoc) when is_binary(DbName)-> + UpdateSeq = couch_util:with_db(DbName, fun(WDb) -> couch_db:get_update_seq(WDb) end), - case couch_index_server:get_index(couch_mrview_index, Db, DDoc) of + case couch_index_server:get_index(couch_mrview_index, DbName, DDoc) of {ok, Pid} -> case catch couch_index:get_state(Pid, UpdateSeq) of {ok, _} -> ok; @@ -376,7 +373,10 @@ refresh(Db, DDoc) -> end; Error -> {error, Error} - end. + end; + +refresh(Db, DDoc) -> + refresh(couch_db:name(Db), DDoc). compact(Db, DDoc) -> compact(Db, DDoc, []). diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl index 9ef79b664..3c30d2fdb 100644 --- a/src/couch_mrview/src/couch_mrview_compactor.erl +++ b/src/couch_mrview/src/couch_mrview_compactor.erl @@ -52,10 +52,7 @@ compact(State) -> CompactFName = couch_mrview_util:compaction_file(DbName, Sig), {ok, Fd} = couch_mrview_util:open_file(CompactFName), ESt = couch_mrview_util:reset_index(Db, Fd, State), - - {ok, DbReduce} = couch_btree:full_reduce(Db#db.id_tree), - Count = element(1, DbReduce), - + {ok, Count} = couch_db:get_doc_count(Db), {ESt, Count} end), @@ -290,7 +287,7 @@ swap_compacted(OldState, NewState) -> unlink(OldState#mrst.fd), erlang:demonitor(OldState#mrst.fd_monitor, [flush]), - + {ok, NewState#mrst{fd_monitor=Ref}}. diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl index 7e3fd78e3..e5638fe83 100644 --- a/src/couch_mrview/src/couch_mrview_http.erl +++ b/src/couch_mrview/src/couch_mrview_http.erl @@ -103,11 +103,11 @@ handle_view_changes_req(#httpd{path_parts=[_,<<"_design">>,DDocName,<<"_view_cha handle_view_req(#httpd{method='GET', path_parts=[_, _, DDocName, _, VName, <<"_info">>]}=Req, Db, _DDoc) -> - + DbName = couch_db:name(Db), DDocId = <<"_design/", DDocName/binary >>, - {ok, Info} = couch_mrview:get_view_info(Db#db.name, DDocId, VName), + {ok, Info} = couch_mrview:get_view_info(DbName, DDocId, VName), - FinalInfo = [{db_name, Db#db.name}, + FinalInfo = [{db_name, DbName}, {ddoc, DDocId}, {view, VName}] ++ Info, chttpd:send_json(Req, 200, {FinalInfo}); @@ -212,7 +212,7 @@ is_restricted(Db, _) -> couch_db:is_system_db(Db). is_public_fields_configured(Db) -> - DbName = ?b2l(Db#db.name), + DbName = ?b2l(couch_db:name(Db)), case config:get("couch_httpd_auth", "authentication_db", "_users") of DbName -> UsersDbPublic = config:get("couch_httpd_auth", "users_db_public", "false"), @@ -237,7 +237,7 @@ do_all_docs_req(Req, Db, Keys, NS) -> {ok, Resp} = couch_httpd:etag_maybe(Req, fun() -> Max = chttpd:chunked_response_buffer_size(), VAcc0 = #vacc{db=Db, req=Req, threshold=Max}, - DbName = ?b2l(Db#db.name), + DbName = ?b2l(couch_db:name(Db)), UsersDbName = config:get("couch_httpd_auth", "authentication_db", "_users"), diff --git a/src/couch_mrview/src/couch_mrview_show.erl b/src/couch_mrview/src/couch_mrview_show.erl index 1ebc85b3e..60e8a2c46 100644 --- a/src/couch_mrview/src/couch_mrview_show.erl +++ b/src/couch_mrview/src/couch_mrview_show.erl @@ -364,13 +364,17 @@ json_apply_field({Key, NewValue}, [], Acc) -> % This loads the db info if we have a fully loaded db record, but we might not % have the db locally on this node, so then load the info through fabric. -json_req_obj(Req, #db{main_pid=Pid}=Db) when is_pid(Pid) -> - chttpd_external:json_req_obj(Req, Db); json_req_obj(Req, Db) -> - % use a separate process because we're already in a receive loop, and - % json_req_obj calls fabric:get_db_info() - spawn_monitor(fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end), - receive {'DOWN', _, _, _, JsonReq} -> JsonReq end. + case couch_db:is_clustered(Db) of + true -> + % use a separate process because we're already in a receive loop, + % and json_req_obj calls fabric:get_db_info() + JRO = fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end, + spawn_monitor(JRO), + receive {'DOWN', _, _, _, JsonReq} -> JsonReq end; + false -> + chttpd_external:json_req_obj(Req, Db) + end. last_chunk(Req, undefined) -> chttpd:send_response(Req, 200, [], <<"">>); diff --git a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl index 5e352797f..bf8eb7e5b 100644 --- a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl +++ b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl @@ -25,7 +25,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_mrview/test/couch_mrview_changes_since_tests.erl b/src/couch_mrview/test/couch_mrview_changes_since_tests.erl index 8b11e3dd0..7e2f321fe 100644 --- a/src/couch_mrview/test/couch_mrview_changes_since_tests.erl +++ b/src/couch_mrview/test/couch_mrview_changes_since_tests.erl @@ -25,7 +25,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_mrview/test/couch_mrview_collation_tests.erl b/src/couch_mrview/test/couch_mrview_collation_tests.erl index c4a714d1e..5c8cb54b1 100644 --- a/src/couch_mrview/test/couch_mrview_collation_tests.erl +++ b/src/couch_mrview/test/couch_mrview_collation_tests.erl @@ -64,7 +64,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_mrview/test/couch_mrview_compact_tests.erl b/src/couch_mrview/test/couch_mrview_compact_tests.erl index 079639f5d..7cd5de817 100644 --- a/src/couch_mrview/test/couch_mrview_compact_tests.erl +++ b/src/couch_mrview/test/couch_mrview_compact_tests.erl @@ -24,7 +24,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl index 028e0be11..5ac3e7ecf 100644 --- a/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl +++ b/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl @@ -21,7 +21,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. ddoc_validation_test_() -> diff --git a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl b/src/couch_mrview/test/couch_mrview_index_changes_tests.erl index 8f0c296aa..2701e0c22 100644 --- a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl +++ b/src/couch_mrview/test/couch_mrview_index_changes_tests.erl @@ -22,7 +22,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. changes_index_test() -> diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/couch_mrview_index_info_tests.erl index 3f88972ea..c994df9d3 100644 --- a/src/couch_mrview/test/couch_mrview_index_info_tests.erl +++ b/src/couch_mrview/test/couch_mrview_index_info_tests.erl @@ -28,7 +28,7 @@ setup() -> teardown({Db, _}) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_mrview/test/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/couch_mrview_map_views_tests.erl index 3a199288d..229af183d 100644 --- a/src/couch_mrview/test/couch_mrview_map_views_tests.erl +++ b/src/couch_mrview/test/couch_mrview_map_views_tests.erl @@ -24,7 +24,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_mrview/test/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/couch_mrview_red_views_tests.erl index 310078597..b83686113 100644 --- a/src/couch_mrview/test/couch_mrview_red_views_tests.erl +++ b/src/couch_mrview/test/couch_mrview_red_views_tests.erl @@ -24,7 +24,7 @@ setup() -> teardown(Db) -> couch_db:close(Db), - couch_server:delete(Db#db.name, [?ADMIN_CTX]), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), ok. diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl index 7f0c7eecb..1ce2bae88 100644 --- a/src/couch_replicator/src/couch_replicator.erl +++ b/src/couch_replicator/src/couch_replicator.erl @@ -477,15 +477,21 @@ handle_call({report_seq_done, Seq, StatsInc}, From, {noreply, NewState}. -handle_cast({db_compacted, DbName}, - #rep_state{source = #db{name = DbName} = Source} = State) -> - {ok, NewSource} = couch_db:reopen(Source), - {noreply, State#rep_state{source = NewSource}}; - -handle_cast({db_compacted, DbName}, - #rep_state{target = #db{name = DbName} = Target} = State) -> - {ok, NewTarget} = couch_db:reopen(Target), - {noreply, State#rep_state{target = NewTarget}}; +handle_cast({db_compacted, DbName}, State) -> + #rep_state{ + source = Source, + target = Target + } = State, + SourceName = couch_replicator_utils:local_db_name(Source), + TargetName = couch_replicator_utils:local_db_name(Target), + case DbName of + SourceName -> + {ok, NewSource} = couch_db:reopen(Source), + {noreply, State#rep_state{source = NewSource}}; + TargetName -> + {ok, NewTarget} = couch_db:reopen(Target), + {noreply, State#rep_state{target = NewTarget}} + end; handle_cast(checkpoint, State) -> #rep_state{rep_details = #rep{} = Rep} = State, @@ -968,10 +974,11 @@ has_session_id(SessionId, [{Props} | Rest]) -> end. -db_monitor(#db{} = Db) -> - couch_db:monitor(Db); -db_monitor(_HttpDb) -> - nil. +db_monitor(#httpdb{}) -> + nil; +db_monitor(Db) -> + couch_db:monitor(Db). + get_pending_count(St) -> Rep = St#rep_state.rep_details, diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl index e5f625335..09f22cbae 100644 --- a/src/couch_replicator/src/couch_replicator_api_wrap.erl +++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl @@ -59,11 +59,11 @@ db_uri(#httpdb{url = Url}) -> couch_util:url_strip_password(Url); -db_uri(#db{name = Name}) -> - db_uri(Name); +db_uri(DbName) when is_binary(DbName) -> + ?b2l(DbName); -db_uri(DbName) -> - ?b2l(DbName). +db_uri(Db) -> + db_uri(couch_db:name(Db)). db_open(Db, Options) -> @@ -148,10 +148,12 @@ get_db_info(#httpdb{} = Db) -> fun(200, _, {Props}) -> {ok, Props} end); -get_db_info(#db{name = DbName, user_ctx = UserCtx}) -> - {ok, Db} = couch_db:open(DbName, [{user_ctx, UserCtx}]), - {ok, Info} = couch_db:get_db_info(Db), - couch_db:close(Db), +get_db_info(Db) -> + DbName = couch_db:name(Db), + UserCtx = couch_db:get_user_ctx(Db), + {ok, InfoDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]), + {ok, Info} = couch_db:get_db_info(InfoDb), + couch_db:close(InfoDb), {ok, [{couch_util:to_binary(K), V} || {K, V} <- Info]}. @@ -171,8 +173,10 @@ get_pending_count(#httpdb{} = Db, Seq) -> send_req(Db, Options, fun(200, _, {Props}) -> {ok, couch_util:get_value(<<"pending">>, Props, null)} end); -get_pending_count(#db{name=DbName}=Db, Seq) when is_number(Seq) -> - {ok, CountDb} = couch_db:open(DbName, [{user_ctx, Db#db.user_ctx}]), +get_pending_count(Db, Seq) when is_number(Seq) -> + DbName = couch_db:name(Db), + UserCtx = couch_db:get_user_ctx(Db), + {ok, CountDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]), Pending = couch_db:count_changes_since(CountDb, Seq), couch_db:close(CountDb), {ok, Pending}. @@ -184,7 +188,8 @@ get_view_info(#httpdb{} = Db, DDocId, ViewName) -> {VInfo} = couch_util:get_value(<<"view_index">>, Props, {[]}), {ok, VInfo} end); -get_view_info(#db{name = DbName}, DDocId, ViewName) -> +get_view_info(Db, DDocId, ViewName) -> + DbName = couch_db:name(Db), {ok, VInfo} = couch_mrview:get_view_info(DbName, DDocId, ViewName), {ok, [{couch_util:to_binary(K), V} || {K, V} <- VInfo]}. diff --git a/src/couch_replicator/src/couch_replicator_manager.erl b/src/couch_replicator/src/couch_replicator_manager.erl index 85dd428f2..48bf7a648 100644 --- a/src/couch_replicator/src/couch_replicator_manager.erl +++ b/src/couch_replicator/src/couch_replicator_manager.erl @@ -879,8 +879,8 @@ state_after_error(#rep_state{retries_left = Left, wait = Wait} = State) -> before_doc_update(#doc{id = <>} = Doc, _Db) -> Doc; -before_doc_update(#doc{body = {Body}} = Doc, #db{user_ctx=UserCtx} = Db) -> - #user_ctx{roles = Roles, name = Name} = UserCtx, +before_doc_update(#doc{body = {Body}} = Doc, Db) -> + #user_ctx{roles = Roles, name = Name} = couch_db:get_user_ctx(Db), case lists:member(<<"_replicator">>, Roles) of true -> Doc; @@ -906,8 +906,8 @@ before_doc_update(#doc{body = {Body}} = Doc, #db{user_ctx=UserCtx} = Db) -> after_doc_read(#doc{id = <>} = Doc, _Db) -> Doc; -after_doc_read(#doc{body = {Body}} = Doc, #db{user_ctx=UserCtx} = Db) -> - #user_ctx{name = Name} = UserCtx, +after_doc_read(#doc{body = {Body}} = Doc, Db) -> + #user_ctx{name = Name} = couch_db:get_user_ctx(Db), case (catch couch_db:check_is_admin(Db)) of ok -> Doc; diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl index e96d52a41..17d33342b 100644 --- a/src/couch_replicator/src/couch_replicator_utils.erl +++ b/src/couch_replicator/src/couch_replicator_utils.erl @@ -14,6 +14,7 @@ -export([parse_rep_doc/2]). -export([open_db/1, close_db/1]). +-export([local_db_name/1]). -export([start_db_compaction_notifier/2, stop_db_compaction_notifier/1]). -export([replication_id/2]). -export([sum_stats/2, is_deleted/1]). @@ -410,26 +411,33 @@ ssl_verify_options(false, _OTPVersion) -> %% New db record has Options field removed here to enable smoother dbcore migration -open_db(#db{name = Name, user_ctx = UserCtx}) -> - {ok, Db} = couch_db:open(Name, [{user_ctx, UserCtx} | []]), - Db; -open_db(HttpDb) -> - HttpDb. - - -close_db(#db{} = Db) -> - couch_db:close(Db); -close_db(_HttpDb) -> - ok. - +open_db(#httpdb{} = HttpDb) -> + HttpDb; +open_db(Db) -> + DbName = couch_db:name(Db), + UserCtx = couch_db:get_user_ctx(Db), + {ok, NewDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]), + NewDb. + +close_db(#httpdb{}) -> + ok; +close_db(Db) -> + couch_db:close(Db). -start_db_compaction_notifier(#db{name = DbName}, Server) -> +start_db_compaction_notifier(#httpdb{}, _) -> + nil; +start_db_compaction_notifier(Db, Server) -> + DbName = couch_db:name(Db), {ok, Pid} = couch_event:link_listener( ?MODULE, handle_db_event, Server, [{dbname, DbName}] ), - Pid; -start_db_compaction_notifier(_, _) -> - nil. + Pid. + + +local_db_name(#httpdb{}) -> + undefined; +local_db_name(Db) -> + couch_db:name(Db). stop_db_compaction_notifier(nil) -> diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl index ee0c45558..1a4e599f4 100644 --- a/src/couch_replicator/src/couch_replicator_worker.erl +++ b/src/couch_replicator/src/couch_replicator_worker.erl @@ -67,16 +67,16 @@ -start_link(Cp, #db{} = Source, Target, ChangesManager, _MaxConns) -> +start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) -> + gen_server:start_link( + ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []); + +start_link(Cp, Source, Target, ChangesManager, _MaxConns) -> Pid = spawn_link(fun() -> erlang:put(last_stats_report, now()), queue_fetch_loop(Source, Target, Cp, Cp, ChangesManager) end), - {ok, Pid}; - -start_link(Cp, Source, Target, ChangesManager, MaxConns) -> - gen_server:start_link( - ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []). + {ok, Pid}. init({Cp, Source, Target, ChangesManager, MaxConns}) -> @@ -139,15 +139,23 @@ handle_call(flush, {Pid, _} = From, {noreply, State2#state{flush_waiter = From}}. -handle_cast({db_compacted, DbName}, - #state{source = #db{name = DbName} = Source} = State) -> - {ok, NewSource} = couch_db:reopen(Source), - {noreply, State#state{source = NewSource}}; - -handle_cast({db_compacted, DbName}, - #state{target = #db{name = DbName} = Target} = State) -> - {ok, NewTarget} = couch_db:reopen(Target), - {noreply, State#state{target = NewTarget}}; +handle_cast({db_compacted, DbName} = Msg, #state{} = State) -> + #state{ + source = Source, + target = Target + } = State, + SourceName = couch_replicator_utils:local_db_name(Source), + TargetName = couch_replicator_utils:local_db_name(Target), + case DbName of + SourceName -> + {ok, NewSource} = couch_db:reopen(Source), + {noreply, State#state{source = NewSource}}; + TargetName -> + {ok, NewTarget} = couch_db:reopen(Target), + {noreply, State#state{target = NewTarget}}; + _Else -> + {stop, {unexpected_async_call, Msg}, State} + end; handle_cast(Msg, State) -> {stop, {unexpected_async_call, Msg}, State}. @@ -220,15 +228,15 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) -> Target2 = open_db(Target), {IdRevs, Stats0} = find_missing(Changes, Target2), case Source of - #db{} -> - Source2 = open_db(Source), - Stats = local_process_batch( - IdRevs, Cp, Source2, Target2, #batch{}, Stats0), - close_db(Source2); #httpdb{} -> ok = gen_server:call(Parent, {add_stats, Stats0}, infinity), remote_process_batch(IdRevs, Parent), - {ok, Stats} = gen_server:call(Parent, flush, infinity) + {ok, Stats} = gen_server:call(Parent, flush, infinity); + _Db -> + Source2 = open_db(Source), + Stats = local_process_batch( + IdRevs, Cp, Source2, Target2, #batch{}, Stats0), + close_db(Source2) end, close_db(Target2), ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity), @@ -245,7 +253,7 @@ local_process_batch([], Cp, Source, Target, #batch{docs = Docs, size = Size}, St case Target of #httpdb{} -> couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]); - #db{} -> + _Db -> couch_log:debug("Worker flushing doc batch of ~p docs", [Size]) end, Stats2 = flush_docs(Target, Docs), @@ -360,7 +368,7 @@ spawn_writer(Target, #batch{docs = DocList, size = Size}) -> case {Target, Size > 0} of {#httpdb{}, true} -> couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]); - {#db{}, true} -> + {_Db, true} -> couch_log:debug("Worker flushing doc batch of ~p docs", [Size]); _ -> ok @@ -422,7 +430,7 @@ maybe_flush_docs(#httpdb{} = Target, Batch, Doc) -> end end; -maybe_flush_docs(#db{} = Target, #batch{docs = DocAcc, size = SizeAcc}, Doc) -> +maybe_flush_docs(Target, #batch{docs = DocAcc, size = SizeAcc}, Doc) -> case SizeAcc + 1 of SizeAcc2 when SizeAcc2 >= ?DOC_BUFFER_LEN -> couch_log:debug("Worker flushing doc batch of ~p docs", [SizeAcc2]), diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/couch_replicator_compact_tests.erl index 7a5a25ab4..5731ff41b 100644 --- a/src/couch_replicator/test/couch_replicator_compact_tests.erl +++ b/src/couch_replicator/test/couch_replicator_compact_tests.erl @@ -82,8 +82,8 @@ should_all_processes_be_alive(RepPid, Source, Target) -> {ok, SourceDb} = reopen_db(Source), {ok, TargetDb} = reopen_db(Target), ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(SourceDb#db.main_pid)), - ?assert(is_process_alive(TargetDb#db.main_pid)) + ?assert(is_process_alive(couch_db:get_pid(SourceDb))), + ?assert(is_process_alive(couch_db:get_pid(TargetDb))) end). should_run_replication(RepPid, RepId, Source, Target) -> @@ -149,12 +149,12 @@ should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) -> compact_db("source", SourceDb), ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(SourceDb#db.main_pid)), + ?assert(is_process_alive(couch_db:get_pid(SourceDb))), wait_for_compaction("source", SourceDb), compact_db("target", TargetDb), ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(TargetDb#db.main_pid)), + ?assert(is_process_alive(couch_db:get_pid(TargetDb))), wait_for_compaction("target", TargetDb), {ok, SourceDb2} = reopen_db(SourceDb), @@ -165,14 +165,14 @@ should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) -> compact_db("source", SourceDb2), ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(SourceDb2#db.main_pid)), + ?assert(is_process_alive(couch_db:get_pid(SourceDb2))), pause_writer(Writer), wait_for_compaction("source", SourceDb2), resume_writer(Writer), compact_db("target", TargetDb2), ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(TargetDb2#db.main_pid)), + ?assert(is_process_alive(couch_db:get_pid(TargetDb2))), pause_writer(Writer), wait_for_compaction("target", TargetDb2), resume_writer(Writer) @@ -248,14 +248,16 @@ should_compare_databases(Source, Target) -> reopen_db({remote, Db}) -> reopen_db(Db); -reopen_db(#db{name=DbName}) -> - reopen_db(DbName); -reopen_db(DbName) -> +reopen_db(DbName) when is_binary(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), ok = couch_db:close(Db), - {ok, Db}. + {ok, Db}; +reopen_db(Db) -> + reopen_db(couch_db:name(Db)). -compact_db(Type, #db{name = Name}) -> + +compact_db(Type, Db0) -> + Name = couch_db:name(Db0), {ok, Db} = couch_db:open_int(Name, []), {ok, CompactPid} = couch_db:start_compact(Db), MonRef = erlang:monitor(process, CompactPid), @@ -395,7 +397,8 @@ stop_writer(Pid) -> {reason, "Timeout stopping source database writer"}]}) end. -writer_loop(#db{name = DbName}, Parent, Counter) -> +writer_loop(Db0, Parent, Counter) -> + DbName = couch_db:name(Db0), {ok, Data} = file:read_file(?ATTFILE), maybe_pause(Parent, Counter), Doc = couch_doc:from_json_obj({[ diff --git a/src/fabric/include/couch_db_tmp.hrl b/src/fabric/include/couch_db_tmp.hrl deleted file mode 100644 index cd3a047d4..000000000 --- a/src/fabric/include/couch_db_tmp.hrl +++ /dev/null @@ -1,296 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --define(LOCAL_DOC_PREFIX, "_local/"). --define(DESIGN_DOC_PREFIX0, "_design"). --define(DESIGN_DOC_PREFIX, "_design/"). - --define(MIN_STR, <<"">>). --define(MAX_STR, <<255>>). % illegal utf string - --define(JSON_ENCODE(V), couch_util:json_encode(V)). --define(JSON_DECODE(V), couch_util:json_decode(V)). - --define(b2l(V), binary_to_list(V)). --define(l2b(V), list_to_binary(V)). - --define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>). - --define(LOG_DEBUG(Format, Args), couch_log:debug(Format, Args)). --define(LOG_INFO(Format, Args), couch_log:notice(Format, Args)). --define(LOG_ERROR(Format, Args), couch_log:error(Format, Args)). - --record(rev_info, - { - rev, - seq = 0, - deleted = false, - body_sp = nil % stream pointer - }). - --record(doc_info, - { - id = <<"">>, - high_seq = 0, - revs = [] % rev_info - }). - --record(full_doc_info, - {id = <<"">>, - update_seq = 0, - deleted = false, - data_size = 0, - rev_tree = [] - }). - --record(httpd, - {mochi_req, - peer, - method, - path_parts, - db_url_handlers, - user_ctx, - req_body = undefined, - design_url_handlers, - auth, - default_fun, - url_handlers - }). - - --record(doc, - { - id = <<"">>, - revs = {0, []}, - - % the json body object. - body = {[]}, - - atts = [], % attachments - - deleted = false, - - % key/value tuple of meta information, provided when using special options: - % couch_db:open_doc(Db, Id, Options). - meta = [] - }). - - --record(att, - { - name, - type, - att_len, - disk_len, % length of the attachment in its identity form - % (that is, without a content encoding applied to it) - % differs from att_len when encoding /= identity - md5= <<>>, - revpos=0, - data, - encoding=identity % currently supported values are: - % identity, gzip - % additional values to support in the future: - % deflate, compress - }). - - --record(user_ctx, - { - name=null, - roles=[], - handler - }). - -% This should be updated anytime a header change happens that requires more -% than filling in new defaults. -% -% As long the changes are limited to new header fields (with inline -% defaults) added to the end of the record, then there is no need to increment -% the disk revision number. -% -% if the disk revision is incremented, then new upgrade logic will need to be -% added to couch_db_updater:init_db. - --define(LATEST_DISK_VERSION, 5). - --record(db_header, - {disk_version = ?LATEST_DISK_VERSION, - update_seq = 0, - unused = 0, - id_tree_state = nil, - seq_tree_state = nil, - local_tree_state = nil, - purge_seq = 0, - purged_docs = nil, - security_ptr = nil, - revs_limit = 1000 - }). - --record(db, - {main_pid = nil, - update_pid = nil, - compactor_pid = nil, - instance_start_time, % number of microsecs since jan 1 1970 as a binary string - fd, - fd_monitor, - header = #db_header{}, - committed_update_seq, - id_tree, - seq_tree, - local_tree, - update_seq, - name, - filepath, - validate_doc_funs = undefined, - security = [], - security_ptr = nil, - user_ctx = #user_ctx{}, - waiting_delayed_commit = nil, - revs_limit = 1000, - fsync_options = [], - is_sys_db = false - }). - - --record(view_query_args, { - start_key, - end_key, - start_docid = ?MIN_STR, - end_docid = ?MAX_STR, - - direction = fwd, - inclusive_end=true, % aka a closed-interval - - limit = 10000000000, % Huge number to simplify logic - skip = 0, - - group_level = 0, - - view_type = nil, - include_docs = false, - stale = false, - multi_get = false, - callback = nil, - list = nil, - keys = nil, - sorted = true, - extra = [] -}). - --record(view_fold_helper_funs, { - reduce_count, - passed_end, - start_response, - send_row -}). - --record(reduce_fold_helper_funs, { - start_response, - send_row -}). - --record(extern_resp_args, { - code = 200, - stop = false, - data = <<>>, - ctype = "application/json", - headers = [], - json = nil -}). - --record(group, { - sig=nil, - dbname, - fd=nil, - name, - def_lang, - design_options=[], - views, - id_btree=nil, - current_seq=0, - purge_seq=0, - query_server=nil, - waiting_delayed_commit=nil, - atts=[] - }). - --record(view, - {id_num, - map_names=[], - def, - btree=nil, - reduce_funs=[], - dbcopies=[], - options=[] - }). - --record(index_header, - {seq=0, - purge_seq=0, - id_btree_state=nil, - view_states=nil - }). - --record(http_db, { - url, - auth = [], - resource = "", - headers = [ - {"User-Agent", "CouchDB/"++couch:version()}, - {"Accept", "application/json"}, - {"Accept-Encoding", "gzip"} - ], - qs = [], - method = get, - body = nil, - options = [ - {response_format,binary}, - {inactivity_timeout, 30000} - ], - retries = 10, - pause = 500, - conn = nil -}). - -% small value used in revision trees to indicate the revision isn't stored --define(REV_MISSING, []). - --record(changes_args, { - feed = "normal", - dir = fwd, - since = "0", - limit = 1000000000000000, - style = main_only, - heartbeat, - timeout, - filter, - include_docs = false -}). - --record(proc, { - pid, - lang, - client = nil, - ddoc_keys = [], - prompt_fun, - set_timeout_fun, - stop_fun, - data_fun -}). - --record(leaf, { - deleted, - ptr, - seq, - size = 0, - atts = [] -}). diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl index f98a5c04a..2b87e461e 100644 --- a/src/fabric/src/fabric.erl +++ b/src/fabric/src/fabric.erl @@ -38,7 +38,7 @@ -include_lib("fabric/include/fabric.hrl"). --type dbname() :: (iodata() | #db{}). +-type dbname() :: (iodata() | tuple()). -type docid() :: iodata(). -type revision() :: {integer(), binary()}. -type callback() :: fun((any(), any()) -> {ok | stop, any()}). @@ -476,10 +476,12 @@ dbname(DbName) when is_list(DbName) -> list_to_binary(DbName); dbname(DbName) when is_binary(DbName) -> DbName; -dbname(#db{name=Name}) -> - Name; -dbname(DbName) -> - erlang:error({illegal_database_name, DbName}). +dbname(Db) -> + try + couch_db:name(Db) + catch error:badarg -> + erlang:error({illegal_database_name, Db}) + end. name(Thing) -> couch_util:to_binary(Thing). diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl index 80b110a24..6ba59f206 100644 --- a/src/fabric/src/fabric_rpc.erl +++ b/src/fabric/src/fabric_rpc.erl @@ -38,7 +38,8 @@ }). %% rpc endpoints -%% call to with_db will supply your M:F with a #db{} and then remaining args +%% call to with_db will supply your M:F with a Db instance +%% and then remaining args %% @equiv changes(DbName, Args, StartSeq, []) changes(DbName, Args, StartSeq) -> @@ -76,13 +77,13 @@ changes(DbName, Options, StartVector, DbOptions) -> args = Args, options = Options, pending = couch_db:count_changes_since(Db, StartSeq), - epochs = get_epochs(Db) + epochs = couch_db:get_epochs(Db) }, try {ok, #cacc{seq=LastSeq, pending=Pending, epochs=Epochs}} = couch_db:changes_since(Db, StartSeq, Enum, Opts, Acc0), rexi:stream_last({complete, [ - {seq, {LastSeq, uuid(Db), owner_of(LastSeq, Epochs)}}, + {seq, {LastSeq, uuid(Db), couch_db:owner_of(Epochs, LastSeq)}}, {pending, Pending} ]}) after @@ -225,7 +226,7 @@ get_missing_revs(DbName, IdRevsList, Options) -> not_found -> {Id, Revs, []} end - end, IdRevsList, couch_btree:lookup(Db#db.id_tree, Ids))}; + end, IdRevsList, couch_db:get_full_doc_infos(Db, Ids))}; Error -> Error end). @@ -249,8 +250,9 @@ group_info(DbName, DDocId, DbOptions) -> reset_validation_funs(DbName) -> case get_or_create_db(DbName, []) of - {ok, #db{main_pid = Pid}} -> - gen_server:cast(Pid, {load_validation_funs, undefined}); + {ok, Db} -> + DbPid = couch_db:get_pid(Db), + gen_server:cast(DbPid, {load_validation_funs, undefined}); _ -> ok end. @@ -358,7 +360,7 @@ changes_enumerator(DocInfo, Acc) -> Opts = if Conflicts -> [conflicts | DocOptions]; true -> DocOptions end, ChangesRow = {change, [ {pending, Pending-1}, - {seq, {Seq, uuid(Db), owner_of(Seq, Epochs)}}, + {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}}, {id, Id}, {changes, Results}, {deleted, Del} | @@ -456,78 +458,17 @@ set_io_priority(DbName, Options) -> ok end. -calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) -> - Seq; -calculate_start_seq(Db, Node, {Seq, Uuid}) -> - % Treat the current node as the epoch node - calculate_start_seq(Db, Node, {Seq, Uuid, Node}); -calculate_start_seq(Db, _Node, {Seq, Uuid, EpochNode}) -> - case is_prefix(Uuid, couch_db:get_uuid(Db)) of - true -> - case is_owner(EpochNode, Seq, couch_db:get_epochs(Db)) of - true -> Seq; - false -> 0 - end; - false -> - %% The file was rebuilt, most likely in a different - %% order, so rewind. - 0 - end; -calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) -> - case is_prefix(Uuid, couch_db:get_uuid(Db)) of - true -> - start_seq(get_epochs(Db), OriginalNode, Seq); - false -> +calculate_start_seq(Db, Node, Seq) -> + case couch_db:calculate_start_seq(Db, Node, Seq) of + N when is_integer(N) -> + N; + {replace, OriginalNode, Uuid, OriginalSeq} -> %% Scan history looking for an entry with %% * target_node == TargetNode %% * target_uuid == TargetUUID %% * target_seq =< TargetSeq %% If such an entry is found, stream from associated source_seq - mem3_rep:find_source_seq(Db, OriginalNode, Uuid, Seq) - end. - -is_prefix(Pattern, Subject) -> - binary:longest_common_prefix([Pattern, Subject]) == size(Pattern). - -is_owner(Node, Seq, Epochs) -> - validate_epochs(Epochs), - Node =:= owner_of(Seq, Epochs). - -owner_of(_Seq, []) -> - undefined; -owner_of(Seq, [{EpochNode, EpochSeq} | _Rest]) when Seq > EpochSeq -> - EpochNode; -owner_of(Seq, [_ | Rest]) -> - owner_of(Seq, Rest). - -get_epochs(Db) -> - Epochs = couch_db:get_epochs(Db), - validate_epochs(Epochs), - Epochs. - -start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq -> - %% OrigNode is the owner of the Seq so we can safely stream from there - Seq; -start_seq([{_, NewSeq}, {OrigNode, _} | _], OrigNode, Seq) when Seq > NewSeq -> - %% We transferred this file before Seq was written on OrigNode, so we need - %% to stream from the beginning of the next epoch. Note that it is _not_ - %% necessary for the current node to own the epoch beginning at NewSeq - NewSeq; -start_seq([_ | Rest], OrigNode, Seq) -> - start_seq(Rest, OrigNode, Seq); -start_seq([], OrigNode, Seq) -> - erlang:error({epoch_mismatch, OrigNode, Seq}). - -validate_epochs(Epochs) -> - %% Assert uniqueness. - case length(Epochs) == length(lists:ukeysort(2, Epochs)) of - true -> ok; - false -> erlang:error(duplicate_epoch) - end, - %% Assert order. - case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of - true -> ok; - false -> erlang:error(epoch_order) + mem3_rep:find_source_seq(Db, OriginalNode, Uuid, OriginalSeq) end. uuid(Db) -> @@ -540,30 +481,6 @@ uuid_prefix_len() -> -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -calculate_start_seq_test() -> - %% uuid mismatch is always a rewind. - Hdr1 = couch_db_header:new(), - Hdr2 = couch_db_header:set(Hdr1, [{epochs, [{node1, 1}]}, {uuid, <<"uuid1">>}]), - ?assertEqual(0, calculate_start_seq(#db{header=Hdr2}, node1, {1, <<"uuid2">>})), - %% uuid matches and seq is owned by node. - Hdr3 = couch_db_header:set(Hdr2, [{epochs, [{node1, 1}]}]), - ?assertEqual(2, calculate_start_seq(#db{header=Hdr3}, node1, {2, <<"uuid1">>})), - %% uuids match but seq is not owned by node. - Hdr4 = couch_db_header:set(Hdr2, [{epochs, [{node2, 2}, {node1, 1}]}]), - ?assertEqual(0, calculate_start_seq(#db{header=Hdr4}, node1, {3, <<"uuid1">>})), - %% return integer if we didn't get a vector. - ?assertEqual(4, calculate_start_seq(#db{}, foo, 4)). - -is_owner_test() -> - ?assertNot(is_owner(foo, 1, [])), - ?assertNot(is_owner(foo, 1, [{foo, 1}])), - ?assert(is_owner(foo, 2, [{foo, 1}])), - ?assert(is_owner(foo, 50, [{bar, 100}, {foo, 1}])), - ?assert(is_owner(foo, 50, [{baz, 200}, {bar, 100}, {foo, 1}])), - ?assert(is_owner(bar, 150, [{baz, 200}, {bar, 100}, {foo, 1}])), - ?assertError(duplicate_epoch, is_owner(foo, 1, [{foo, 1}, {bar, 1}])), - ?assertError(epoch_order, is_owner(foo, 1, [{foo, 100}, {bar, 200}])). - maybe_filtered_json_doc_no_filter_test() -> Body = {[{<<"a">>, 1}]}, Doc = #doc{id = <<"1">>, revs = {1, [<<"r1">>]}, body = Body}, diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl index 7e3f23e68..c5aef0377 100644 --- a/src/fabric/src/fabric_util.erl +++ b/src/fabric/src/fabric_util.erl @@ -302,7 +302,8 @@ path_ends_with(Path, Suffix) -> fake_db(DbName, Opts) -> {SecProps} = fabric:get_security(DbName), % as admin UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}), - #db{name = DbName, security = SecProps, user_ctx = UserCtx}. + {ok, Db} = couch_db:clustered_db(DbName, UserCtx, SecProps), + Db. %% test function kv(Item, Count) -> diff --git a/src/mango/src/mango_crud.erl b/src/mango/src/mango_crud.erl index 68c9d6cc4..41a4d143d 100644 --- a/src/mango/src/mango_crud.erl +++ b/src/mango/src/mango_crud.erl @@ -111,7 +111,7 @@ maybe_add_user_ctx(Db, Opts) -> {user_ctx, _} -> Opts; false -> - [{user_ctx, Db#db.user_ctx} | Opts] + [{user_ctx, couch_db:get_user_ctx(Db)} | Opts] end. diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl index 96e365a49..dfe942c38 100644 --- a/src/mango/src/mango_cursor_text.erl +++ b/src/mango/src/mango_cursor_text.erl @@ -50,7 +50,7 @@ create(Db, Indexes, Selector, Opts0) -> ?MANGO_ERROR(multiple_text_indexes) end, - Opts = unpack_bookmark(Db#db.name, Opts0), + Opts = unpack_bookmark(couch_db:name(Db), Opts0), DreyfusLimit = get_dreyfus_limit(), Limit = erlang:min(DreyfusLimit, couch_util:get_value(limit, Opts, mango_opts:default_limit())), @@ -96,7 +96,7 @@ execute(Cursor, UserFun, UserAcc) -> }, CAcc = #cacc{ selector = Selector, - dbname = Db#db.name, + dbname = couch_db:name(Db), ddocid = ddocid(Idx), idx_name = mango_idx:name(Idx), bookmark = get_bookmark(Opts), diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl index a08827649..cc6cbd5d8 100644 --- a/src/mango/src/mango_httpd.erl +++ b/src/mango/src/mango_httpd.erl @@ -190,7 +190,8 @@ handle_find_req(Req, _Db) -> set_user_ctx(#httpd{user_ctx=Ctx}, Db) -> - Db#db{user_ctx=Ctx}. + {ok, NewDb} = couch_db:set_user_ctx(Db, Ctx), + NewDb. get_idx_w_opts(Opts) -> diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl index bc88b970c..1c3924aaa 100644 --- a/src/mango/src/mango_idx.erl +++ b/src/mango/src/mango_idx.erl @@ -290,12 +290,12 @@ idx_mod(#idx{type = <<"text">>}) -> end. -db_to_name(#db{name=Name}) -> - Name; db_to_name(Name) when is_binary(Name) -> Name; db_to_name(Name) when is_list(Name) -> - iolist_to_binary(Name). + iolist_to_binary(Name); +db_to_name(Db) -> + couch_db:name(Db). get_idx_def(Opts) -> diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl index ad9d2e8d7..f6120a829 100644 --- a/src/mango/src/mango_idx_text.erl +++ b/src/mango/src/mango_idx_text.erl @@ -344,8 +344,9 @@ indexable_fields(Fields, {op_default, _}) -> [<<"$default">> | Fields]. -maybe_reject_index_all_req({Def}, #db{name=DbName, user_ctx=Ctx}) -> - User = Ctx#user_ctx.name, +maybe_reject_index_all_req({Def}, Db) -> + DbName = couch_db:name(Db), + #user_ctx{name = User} = couch_db:get_user_ctx(Db), Fields = couch_util:get_value(fields, Def), case {Fields, forbid_index_all()} of {all_fields, "true"} -> @@ -374,7 +375,9 @@ setup() -> end), %default index all def that generates {fields, all_fields} Index = #idx{def={[]}}, - Db = #db{name = <<"testdb">>, user_ctx=#user_ctx{name = <<"u1">>}}, + DbName = <<"testdb">>, + UserCtx = #user_ctx{name = <<"u1">>}, + {ok, Db} = couch_db:clustered_db(DbName, UserCtx), {Index, Db}. diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl index 405d7e5fa..e9c1473bc 100644 --- a/src/mem3/src/mem3.erl +++ b/src/mem3/src/mem3.erl @@ -145,13 +145,13 @@ get_shard(DbName, Node, Range) -> local_shards(DbName) -> mem3_shards:local(DbName). -shard_suffix(#db{name=DbName}) -> - shard_suffix(DbName); -shard_suffix(DbName0) -> +shard_suffix(DbName0) when is_binary(DbName0) -> Shard = hd(shards(DbName0)), <<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>> = Shard#shard.name, - filename:extension(binary_to_list(DbName)). + filename:extension(binary_to_list(DbName)); +shard_suffix(Db) -> + shard_suffix(couch_db:name(Db)). fold_shards(Fun, Acc) -> mem3_shards:fold(Fun, Acc). @@ -292,10 +292,11 @@ group_by_range(Shards) -> % quorum functions -quorum(#db{name=DbName}) -> - quorum(DbName); -quorum(DbName) -> - n(DbName) div 2 + 1. +quorum(DbName) when is_binary(DbName) -> + n(DbName) div 2 + 1; +quorum(Db) -> + quorum(couch_db:name(Db)). + node(#shard{node=Node}) -> Node; diff --git a/src/mem3/src/mem3_httpd.erl b/src/mem3/src/mem3_httpd.erl index 535815862..571f06370 100644 --- a/src/mem3/src/mem3_httpd.erl +++ b/src/mem3/src/mem3_httpd.erl @@ -32,7 +32,7 @@ handle_membership_req(#httpd{path_parts=[<<"_membership">>]}=Req) -> handle_shards_req(#httpd{method='GET', path_parts=[_DbName, <<"_shards">>]} = Req, Db) -> - DbName = mem3:dbname(Db#db.name), + DbName = mem3:dbname(couch_db:name(Db)), Shards = mem3:shards(DbName), JsonShards = json_shards(Shards, dict:new()), couch_httpd:send_json(Req, {[ @@ -40,7 +40,7 @@ handle_shards_req(#httpd{method='GET', ]}); handle_shards_req(#httpd{method='GET', path_parts=[_DbName, <<"_shards">>, DocId]} = Req, Db) -> - DbName = mem3:dbname(Db#db.name), + DbName = mem3:dbname(couch_db:name(Db)), Shards = mem3:shards(DbName, DocId), {[{Shard, Dbs}]} = json_shards(Shards, dict:new()), couch_httpd:send_json(Req, {[ diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl index f31891a7b..555389b90 100644 --- a/src/mem3/src/mem3_nodes.erl +++ b/src/mem3/src/mem3_nodes.erl @@ -92,7 +92,7 @@ code_change(_OldVsn, #state{}=State, _Extra) -> initialize_nodelist() -> DbName = config:get("mem3", "nodes_db", "_nodes"), {ok, Db} = mem3_util:ensure_exists(DbName), - {ok, _, Db} = couch_btree:fold(Db#db.id_tree, fun first_fold/3, Db, []), + {ok, _} = couch_db:fold_docs(Db, fun first_fold/2, Db, []), % add self if not already present case ets:lookup(?MODULE, node()) of [_] -> @@ -103,13 +103,13 @@ initialize_nodelist() -> {ok, _} = couch_db:update_doc(Db, Doc, []) end, couch_db:close(Db), - Db#db.update_seq. + couch_db:get_update_seq(Db). -first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, _, Acc) -> +first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) -> {ok, Acc}; -first_fold(#full_doc_info{deleted=true}, _, Acc) -> +first_fold(#full_doc_info{deleted=true}, Acc) -> {ok, Acc}; -first_fold(#full_doc_info{id=Id}=DocInfo, _, Db) -> +first_fold(#full_doc_info{id=Id}=DocInfo, Db) -> {ok, #doc{body={Props}}} = couch_db:open_doc(Db, DocInfo, [ejson_body]), ets:insert(?MODULE, {mem3_util:to_atom(Id), Props}), {ok, Db}. diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl index ad7ac55f5..85d46e2cd 100644 --- a/src/mem3/src/mem3_rep.erl +++ b/src/mem3/src/mem3_rep.erl @@ -170,11 +170,11 @@ find_source_seq_int(#doc{body={Props}}, SrcNode0, TgtNode0, TgtUUID, TgtSeq) -> end. -repl(#db{name=DbName, seq_tree=Bt}=Db, Acc0) -> - erlang:put(io_priority, {internal_repl, DbName}), +repl(Db, Acc0) -> + erlang:put(io_priority, {internal_repl, couch_db:name(Db)}), #acc{seq=Seq} = Acc1 = calculate_start_seq(Acc0#acc{source = Db}), Fun = fun ?MODULE:changes_enumerator/3, - {ok, _, Acc2} = couch_btree:fold(Bt, Fun, Acc1, [{start_key, Seq + 1}]), + {ok, _, Acc2} = couch_db:enum_docs_since(Db, Seq, Fun, Acc1, []), {ok, #acc{seq = LastSeq}} = replicate_batch(Acc2), {ok, couch_db:count_changes_since(Db, LastSeq)}. @@ -354,10 +354,10 @@ find_repl_doc(SrcDb, TgtUUIDPrefix) -> end end, Options = [{start_key, DocIdPrefix}], - case couch_btree:fold(SrcDb#db.local_tree, FoldFun, not_found, Options) of - {ok, _, {TgtUUID, Doc}} -> + case couch_db:fold_local_docs(SrcDb, FoldFun, not_found, Options) of + {ok, {TgtUUID, Doc}} -> {ok, TgtUUID, Doc}; - {ok, _, not_found} -> + {ok, not_found} -> {not_found, missing}; Else -> couch_log:error("Error finding replication doc: ~w", [Else]), diff --git a/src/mem3/src/mem3_rpc.erl b/src/mem3/src/mem3_rpc.erl index 93cb99ac9..c2bd58fdf 100644 --- a/src/mem3/src/mem3_rpc.erl +++ b/src/mem3/src/mem3_rpc.erl @@ -84,11 +84,11 @@ load_checkpoint_rpc(DbName, SourceNode, SourceUUID) -> save_checkpoint_rpc(DbName, Id, SourceSeq, NewEntry0, History0) -> erlang:put(io_priority, {internal_repl, DbName}), case get_or_create_db(DbName, [?ADMIN_CTX]) of - {ok, #db{update_seq = TargetSeq} = Db} -> + {ok, Db} -> NewEntry = {[ {<<"target_node">>, atom_to_binary(node(), utf8)}, {<<"target_uuid">>, couch_db:get_uuid(Db)}, - {<<"target_seq">>, TargetSeq} + {<<"target_seq">>, couch_db:get_update_seq(Db)} ] ++ NewEntry0}, Body = {[ {<<"seq">>, SourceSeq}, diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl index c7f33c61f..f99855285 100644 --- a/src/mem3/src/mem3_shards.erl +++ b/src/mem3/src/mem3_shards.erl @@ -267,7 +267,7 @@ get_update_seq() -> DbName = config:get("mem3", "shards_db", "_dbs"), {ok, Db} = mem3_util:ensure_exists(DbName), couch_db:close(Db), - Db#db.update_seq. + couch_db:get_update_seq(Db). listen_for_changes(Since) -> DbName = config:get("mem3", "shards_db", "_dbs"), @@ -317,7 +317,7 @@ load_shards_from_disk(DbName) when is_binary(DbName) -> couch_db:close(Db) end. -load_shards_from_db(#db{} = ShardDb, DbName) -> +load_shards_from_db(ShardDb, DbName) -> case couch_db:open_doc(ShardDb, DbName, [ejson_body]) of {ok, #doc{body = {Props}}} -> Shards = mem3_util:build_ordered_shards(DbName, Props), -- cgit v1.2.1