summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul J. Davis <paul.joseph.davis@gmail.com>2017-07-06 16:37:23 -0500
committerPaul J. Davis <paul.joseph.davis@gmail.com>2017-07-19 11:11:42 -0500
commit7cd7a8db0de2ae7d7128a91ddfa2445182200eaf (patch)
tree26b08959422b226b0f8c607650d14a020f67b085
parent2ebb1552a8567feb70630072626ec4928f9784cf (diff)
downloadcouchdb-7cd7a8db0de2ae7d7128a91ddfa2445182200eaf.tar.gz
FIXUP: Don't manually track cache size
Not quite sure why I started doing this in the first place. I think it was becuase I was worried ets:info/2 might be expensive but I can get away with only calling it once here so no big deal any more.
-rw-r--r--src/ddoc_cache/src/ddoc_cache_lru.erl14
-rw-r--r--src/ddoc_cache/test/ddoc_cache_coverage_test.erl6
-rw-r--r--src/ddoc_cache/test/ddoc_cache_lru_test.erl38
3 files changed, 45 insertions, 13 deletions
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 35173f4b5..27649592d 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -42,7 +42,6 @@
-record(st, {
pids, % pid -> key
dbs, % dbname -> docid -> key -> pid
- size,
evictor
}).
@@ -93,7 +92,6 @@ init(_) ->
{ok, #st{
pids = Pids,
dbs = Dbs,
- size = 0,
evictor = Evictor
}}.
@@ -109,20 +107,20 @@ terminate(_Reason, St) ->
handle_call({start, Key, Default}, _From, St) ->
#st{
pids = Pids,
- dbs = Dbs,
- size = CurSize
+ dbs = Dbs
} = St,
case ets:lookup(?CACHE, Key) of
[] ->
MaxSize = config:get_integer("ddoc_cache", "max_size", 1000),
+ CurSize = ets:info(?CACHE, size),
case trim(St, CurSize, max(0, MaxSize)) of
- {ok, N} ->
+ ok ->
true = ets:insert_new(?CACHE, #entry{key = Key}),
{ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
ok = khash:put(Pids, Pid, Key),
store_key(Dbs, Key, Pid),
- {reply, {ok, Pid}, St#st{size = CurSize - N + 1}};
+ {reply, {ok, Pid}, St};
full ->
?EVENT(full, Key),
{reply, full, St}
@@ -251,7 +249,7 @@ trim(_, _, 0) ->
full;
trim(_St, CurSize, MaxSize) when CurSize < MaxSize ->
- {ok, 0};
+ ok;
trim(St, CurSize, MaxSize) when CurSize >= MaxSize ->
case ets:first(?LRU) of
@@ -259,7 +257,7 @@ trim(St, CurSize, MaxSize) when CurSize >= MaxSize ->
full;
{_Ts, Key, Pid} ->
remove_entry(St, Key, Pid),
- {ok, 1}
+ trim(St, CurSize - 1, MaxSize)
end.
diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
index 395f56048..91182caac 100644
--- a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl
@@ -33,7 +33,7 @@ coverage_test_() ->
restart_lru() ->
send_bad_messages(ddoc_cache_lru),
- ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c, d})),
+ ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})),
?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
@@ -47,7 +47,7 @@ restart_evictor() ->
meck:new(ddoc_cache_ev, [passthrough]),
try
State = sys:get_state(ddoc_cache_lru),
- Evictor = element(5, State),
+ Evictor = element(4, State),
Ref = erlang:monitor(process, Evictor),
exit(Evictor, shutdown),
receive
@@ -57,7 +57,7 @@ restart_evictor() ->
end,
meck:wait(ddoc_cache_ev, event, [evictor_died, '_'], 1000),
NewState = sys:get_state(ddoc_cache_lru),
- NewEvictor = element(5, NewState),
+ NewEvictor = element(4, NewState),
?assertNotEqual(Evictor, NewEvictor)
after
meck:unload()
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
index 77b39cd59..dd7782872 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -63,7 +63,8 @@ check_lru_test_() ->
fun check_multi_start/1,
fun check_multi_open/1,
fun check_capped_size/1,
- fun check_full_cache/1
+ fun check_full_cache/1,
+ fun check_cache_refill/1
]}
}.
@@ -88,7 +89,7 @@ check_multi_start(_) ->
?assert(is_process_alive(Opener)),
Opener ! go,
receive {'DOWN', OpenerRef, _, _, _} -> ok end,
- lists:foreach(fun({CPid, Ref}) ->
+ lists:foreach(fun({_, Ref}) ->
receive
{'DOWN', Ref, _, _, normal} -> ok
end
@@ -159,3 +160,36 @@ check_full_cache(_) ->
meck:wait(I - 5, ddoc_cache_ev, event, [full, '_'], 1000),
?assertEqual(5, ets:info(?CACHE, size))
end, lists:seq(6, 20)).
+
+
+check_cache_refill({DbName, _}) ->
+ ddoc_cache_tutil:clear(),
+ meck:reset(ddoc_cache_ev),
+
+ InitDDoc = fun(I) ->
+ NumBin = list_to_binary(integer_to_list(I)),
+ DDocId = <<"_design/", NumBin/binary>>,
+ Doc = #doc{id = DDocId, body = {[]}},
+ {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]),
+ {ok, _} = ddoc_cache:open_doc(DbName, DDocId),
+ {ddoc_cache_entry_ddocid, {DbName, DDocId}}
+ end,
+
+ lists:foreach(fun(I) ->
+ Key = InitDDoc(I),
+ couch_log:error("STARTED? ~p", [Key]),
+ meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+ ?assert(ets:info(?CACHE, size) > 0)
+ end, lists:seq(1, 5)),
+
+ ShardName = element(2, hd(mem3:shards(DbName))),
+ {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
+ meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000),
+ meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000),
+ ?assertEqual(0, ets:info(?CACHE, size)),
+
+ lists:foreach(fun(I) ->
+ Key = InitDDoc(I),
+ meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
+ ?assert(ets:info(?CACHE, size) > 0)
+ end, lists:seq(6, 10)).