summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGarren Smith <garren.smith@gmail.com>2018-08-07 12:15:14 +0200
committerRobert Newson <rnewson@apache.org>2018-08-08 11:53:00 +0100
commit6df0eeef23e8aaef97eff19ab42f674afbed33fa (patch)
treeedcf40ddeab7f709ed10edd2268ad313b48a44f5
parent23683ab079a3a7f01d2f94d93c30ab5726f4f5e1 (diff)
downloadcouchdb-user-partitioned-dbs-4.tar.gz
mem3 tests for partitioned databasesuser-partitioned-dbs-4
-rw-r--r--src/mem3/src/mem3.erl22
-rw-r--r--src/mem3/src/mem3_shards.erl68
-rw-r--r--src/mem3/src/mem3_util.erl59
3 files changed, 147 insertions, 2 deletions
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 99c0863c6..aecca2fdd 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -389,4 +389,26 @@ allowed_nodes_test_() ->
]
}]}.
+is_partitioned_false_shards_db_test() ->
+ meck:expect(config, get, fun (_, _, Default) -> Default end),
+ ?assertEqual(is_partitioned(<<"_dbs">>), false),
+ meck:unload().
+
+is_partitioned_false_nodes_db_test() ->
+ meck:expect(config, get, fun (_, _, Default) -> Default end),
+ ?assertEqual(is_partitioned(<<"_nodes">>), false),
+ meck:unload().
+
+is_partitioned_true_partitioned_db_test() ->
+ Shard = #shard{
+ opts = [{partitioned, true}]
+ },
+ ?assertEqual(is_partitioned([Shard]), true).
+
+is_partitioned_false_partitioned_db_test() ->
+ Shard = #shard{
+ opts = []
+ },
+ ?assertEqual(is_partitioned([Shard]), false).
+
-endif.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 1cd7be683..cfbc32ee4 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -559,7 +559,9 @@ mem3_shards_test_() ->
t_writer_does_not_delete_other_writers_for_same_shard(),
t_spawn_writer_in_load_shards_from_db(),
t_cache_insert_takes_new_update(),
- t_cache_insert_ignores_stale_update_and_kills_worker()
+ t_cache_insert_ignores_stale_update_and_kills_worker(),
+ t_load_shards_from_disk_returns_correct_shard_for_partition(),
+ t_for_docid_returns_correct_shard_for_partition()
]
}.
@@ -703,6 +705,70 @@ t_cache_insert_ignores_stale_update_and_kills_worker() ->
?assertEqual([], ets:tab2list(?OPENERS))
end).
+t_load_shards_from_disk_returns_correct_shard_for_partition() ->
+ ?_test(begin
+ Shards = [
+ #ordered_shard{
+ name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+ node = 'node1@127.0.0.1',
+ dbname = <<"db1">>,
+ range = [2147483648,2684354559],
+ ref = undefined,
+ order = 1,
+ opts = [{partitioned,true}]
+ }
+ ],
+ DbName = <<"db1">>,
+ DocId = <<"foo:123">>,
+ Doc = #doc{body = {[]}},
+ meck:expect(couch_db, open_doc, 3, {ok, Doc}),
+ meck:expect(couch_db, get_update_seq, 1, 1),
+ meck:expect(mem3_util, build_ordered_shards, 2, Shards),
+ meck:expect(mem3_util, ensure_exists, 1, {ok, <<"shard-name">>}),
+ meck:expect(couch_db, close, 1, ok),
+
+ [Shard] = load_shards_from_disk(DbName, DocId),
+
+ meck:validate(couch_db),
+ meck:validate(mem3_util),
+
+ ShardName = Shard#ordered_shard.name,
+ ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+ end).
+
+t_for_docid_returns_correct_shard_for_partition() ->
+ ?_test(begin
+ Shards = [
+ #ordered_shard{
+ name = <<"shards/60000000-7fffffff/db1.1533630706">>,
+ node = 'node1@127.0.0.1',
+ dbname = <<"db1">>,
+ range = [1610612736,2147483647],
+ ref = undefined,
+ order = 1,
+ opts = [{partitioned,true}]
+ },
+ #ordered_shard{
+ name = <<"shards/80000000-9fffffff/db1.1533630706">>,
+ node = 'node1@127.0.0.1',
+ dbname = <<"db1">>,
+ range = [2147483648,2684354559],
+ ref = undefined,
+ order = 1,
+ opts = [{partitioned,true}]
+ }
+ ],
+ DbName = <<"db1">>,
+ DocId = <<"foo:123">>,
+
+ true = ets:insert(?SHARDS, Shards),
+
+ [Shard] = for_docid(DbName, DocId, [ordered]),
+
+ ShardName = Shard#ordered_shard.name,
+ ?assertEqual(ShardName, <<"shards/80000000-9fffffff/db1.1533630706">>)
+ end).
+
mock_state(UpdateSeq) ->
#st{
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 9620e9827..7b8dd1619 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -35,7 +35,6 @@ hash(Item) when is_binary(Item) ->
hash(Item) ->
erlang:crc32(term_to_binary(Item)).
-
docid_hash(DocId) when is_binary(DocId) ->
docid_hash(DocId, []).
@@ -294,3 +293,61 @@ downcast(#ordered_shard{}=S) ->
};
downcast(Shards) when is_list(Shards) ->
[downcast(Shard) || Shard <- Shards].
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+docid_hash_design_doc_test() ->
+ Id = <<"_design/ddoc">>,
+ Hash = docid_hash(Id),
+ ?assertEqual(Hash, erlang:crc32(Id)).
+
+docid_hash_doc_partition_false_test() ->
+ Id = <<"partitionkey:docid">>,
+ IdHash = erlang:crc32(Id),
+ Hash = docid_hash(Id),
+ ?assertEqual(Hash, IdHash),
+ Hash = docid_hash(Id, []),
+ ?assertEqual(Hash, IdHash).
+
+docid_hash_doc_partition_true_test() ->
+ Id = <<"partitionkey:doc:id">>,
+ Hash = docid_hash(Id, [{partitioned, true}]),
+ ?assertEqual(Hash, erlang:crc32(<<"partitionkey">>)).
+
+
+add_shards_by_node_adds_partition_prop_test() ->
+ DocProp = [
+ {<<"_id">>, <<"database-name">>},
+ {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+ {<<"shard_suffix">>,".1533550200"},
+ {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+ {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+ {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+ {<<"options">>,{[{partitioned,true}]}}
+ ],
+
+ [ShardRange | _] = build_shards_by_node(<<"database-name">>, DocProp),
+ Opts = ShardRange#shard.opts,
+ Partitioned = lists:keyfind(partitioned, 1, Opts),
+ ?assertEqual(Partitioned, {partitioned, true}).
+
+
+add_shards_by_range_adds_partition_prop_test() ->
+ DocProp = [
+ {<<"_id">>, <<"database-name">>},
+ {<<"_rev">>,<<"1-fb8e28457a6e0c49de1848b5e4a28238">>},
+ {<<"shard_suffix">>,".1533550200"},
+ {<<"changelog">>, [[<<"add">>,<<"00000000-1fffffff">>,<<"node1@127.0.0.1">>]]},
+ {<<"by_node">>, {[{<<"node1@127.0.0.1">>, [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>]}]}},
+ {<<"by_range">>, {[{<<"00000000-1fffffff">>,[<<"node1@127.0.0.1">>]}]}},
+ {<<"options">>,{[{partitioned,true}]}}
+ ],
+
+ [ShardRange | _] = build_shards_by_range(<<"database-name">>, DocProp),
+ Opts = ShardRange#ordered_shard.opts,
+ Partitioned = lists:keyfind(partitioned, 1, Opts),
+ ?assertEqual(Partitioned, {partitioned, true}).
+
+-endif.