diff options
author | Ronny Berndt <ronny@apache.org> | 2022-06-23 14:16:54 +0200 |
---|---|---|
committer | Ronny Berndt <ronny@apache.org> | 2022-06-23 15:17:46 +0200 |
commit | 0bfbb63e40ee9e95858dbde1d212ecb478fe0888 (patch) | |
tree | 0253025fc4c8e0ef022cbbb21ec7b33f37cb5c90 | |
parent | ab00be43702cfdd74e8a3c24d649aa1b6d06df8a (diff) | |
download | couchdb-0bfbb63e40ee9e95858dbde1d212ecb478fe0888.tar.gz |
Backport commits from fdbmain into main (old 3.x)
Cherry-picked commits from 0156a55012b76adb652c11032596d9801c71665e
Thx @kianmeng
31 files changed, 50 insertions, 50 deletions
diff --git a/.github/ISSUE_TEMPLATE/rfc.md b/.github/ISSUE_TEMPLATE/rfc.md index 08bd0549e..a966bd9f8 100644 --- a/.github/ISSUE_TEMPLATE/rfc.md +++ b/.github/ISSUE_TEMPLATE/rfc.md @@ -62,7 +62,7 @@ document are to be interpreted as described in [NOTE]: # ( Headers and parameters accepted ) [NOTE]: # ( JSON in [if a PUT or POST type] ) [NOTE]: # ( JSON out ) -[NOTE]: # ( Valid status codes and their defintions ) +[NOTE]: # ( Valid status codes and their definitions ) [NOTE]: # ( A proposed Request and Response block ) ## HTTP API deprecations @@ -541,7 +541,7 @@ def degrade_cluster(ctx): ctx["procs"].append(haproxy_proc) -@log("Stoping proc {proc.pid}") +@log("Stopping proc {proc.pid}") def kill_process(proc): if proc and proc.returncode is None: proc.kill() @@ -28,7 +28,7 @@ defmodule Mix.Tasks.Suite do ``` """ use Mix.Task - @shortdoc "Outputs all availabe integration tests" + @shortdoc "Outputs all available integration tests" def run(_) do Path.wildcard(Path.join(Mix.Project.build_path(), "/**/ebin")) |> Enum.filter(&File.dir?/1) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 17d5a9378..cefd9e493 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -492,7 +492,7 @@ partitioned||* = true ; How much time to wait before retrying after a missing doc exception. This ; exception happens if the document was seen in the changes feed, but internal ; replication hasn't caught up yet, and fetching document's revisions -; fails. This a common scenario when source is updated while continous +; fails. This a common scenario when source is updated while continuous ; replication is running. The retry period would depend on how quickly internal ; replication is expected to catch up. In general this is an optimisation to ; avoid crashing the whole replication job, which would consume more resources @@ -593,7 +593,7 @@ partitioned||* = true ; The journald writer doesn't have any options. It still writes ; the logs to stderr, but without the timestamp prepended, since ; the journal will add it automatically, and with the log level -; formated as per +; formatted as per ; https://www.freedesktop.org/software/systemd/man/sd-daemon.html ; ; diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini index 398cf3e2c..4c847617c 100644 --- a/rel/overlay/etc/local.ini +++ b/rel/overlay/etc/local.ini @@ -78,7 +78,7 @@ ;tls_versions = [tlsv1, 'tlsv1.1', 'tlsv1.2'] ; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to -; the Virual Host will be redirected to the path. In the example below all requests +; the Virtual Host will be redirected to the path. In the example below all requests ; to http://example.com/ are redirected to /database. ; If you run CouchDB on a specific port, include the port number in the vhost: ; example.com:5984 = /database diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl index 1d7163799..93b610719 100644 --- a/src/chttpd/src/chttpd.erl +++ b/src/chttpd/src/chttpd.erl @@ -1110,7 +1110,7 @@ error_info({error, security_migration_updates_disabled}) -> "security migration." >>}; error_info(all_workers_died) -> - {503, <<"service unvailable">>, << + {503, <<"service unavailable">>, << "Nodes are unable to service this " "request due to overloading or maintenance mode." >>}; diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl index ea4e62f80..f767c82c5 100644 --- a/src/chttpd/src/chttpd_sup.erl +++ b/src/chttpd/src/chttpd_sup.erl @@ -106,7 +106,7 @@ append_if_set({_Key, 0}, Opts) -> Opts; append_if_set({Key, Value}, Opts) -> couch_log:error( - "The value for `~s` should be string convertable " + "The value for `~s` should be string convertible " "to integer which is >= 0 (got `~p`)", [Key, Value] ), diff --git a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl index 3eda08ae0..a88c95055 100644 --- a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl +++ b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl @@ -114,7 +114,7 @@ should_return_503_error_for_open_revs_post_form(Url) -> ErrorMessage = couch_util:get_value(<<"error">>, Json1), [ ?_assertEqual(503, Code), - ?_assertEqual(<<"service unvailable">>, ErrorMessage) + ?_assertEqual(<<"service unavailable">>, ErrorMessage) ]. mock_open_revs(RevsResp) -> diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl index 2b74ebadb..9dc5053e6 100644 --- a/src/couch/src/couch_debug.erl +++ b/src/couch/src/couch_debug.erl @@ -229,8 +229,8 @@ help(mapfold_tree) -> It calls a user provided callback for every node of the tree. `Fun(Key, Value, Pos, Acc) -> {NewValue, NewAcc}`. Where: - - Key of the node (usualy Pid of a process) - - Value of the node (usualy information collected by link_tree) + - Key of the node (usually Pid of a process) + - Value of the node (usually information collected by link_tree) - Pos - depth from the root of the tree - Acc - user's accumulator @@ -245,8 +245,8 @@ help(map_tree) -> It calls a user provided callback `Fun(Key, Value, Pos) -> NewValue` Where: - - Key of the node (usualy Pid of a process) - - Value of the node (usualy information collected by link_tree) + - Key of the node (usually Pid of a process) + - Value of the node (usually information collected by link_tree) - Pos - depth from the root of the tree --- @@ -258,8 +258,8 @@ help(fold_tree) -> about the tree. It calls a user provided callback `Fun(Key, Value, Pos) -> NewValue` Where: - - Key of the node (usualy Pid of a process) - - Value of the node (usualy information collected by link_tree) + - Key of the node (usually Pid of a process) + - Value of the node (usually information collected by link_tree) - Pos - depth from the root of the tree --- diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index b5ca8bd29..39faea418 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -1407,7 +1407,7 @@ http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) -> % Special handling for the 413 response. Make sure the socket is closed as % we don't know how much data was read before the error was thrown. Also - % drain all the data in the receive buffer to avoid connction being reset + % drain all the data in the receive buffer to avoid connection being reset % before the 413 response is parsed by the client. This is still racy, it % just increases the chances of 413 being detected correctly by the client % (rather than getting a brutal TCP reset). diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl index 24a0c15ed..47096e120 100644 --- a/src/couch/src/couch_httpd_auth.erl +++ b/src/couch/src/couch_httpd_auth.erl @@ -150,7 +150,7 @@ null_authentication_handler(Req) -> % % This handler allows creation of a userCtx object from a user authenticated remotly. % The client just pass specific headers to CouchDB and the handler create the userCtx. -% Headers name can be defined in local.ini. By thefault they are : +% Headers name can be defined in local.ini. By default they are : % % * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in % couch_httpd_auth section) @@ -158,8 +158,8 @@ null_authentication_handler(Req) -> % comma (x_auth_roles in couch_httpd_auth section) % * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token % in couch_httpd_auth section). This token is an hmac-sha1 created from secret key -% and username. The secret key should be the same in the client and couchdb node. s -% ecret key is the secret key in couch_httpd_auth section of ini. This token is optional +% and username. The secret key should be the same in the client and couchdb node. +% Secret key is the secret key in couch_httpd_auth section of ini. This token is optional % if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true. % proxy_authentication_handler(Req) -> diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl index 0bff6a36d..3b42c8fd1 100644 --- a/src/couch/src/couch_httpd_vhost.erl +++ b/src/couch/src/couch_httpd_vhost.erl @@ -49,7 +49,7 @@ %% example.com = /example %% *.example.com = /example %% -%% The first line will rewrite the rquest to display the content of the +%% The first line will rewrite the request to display the content of the %% example database. This rule works only if the Host header is %% 'example.com' and won't work for CNAMEs. Second rule on the other hand %% match all CNAMES to example db. So www.example.com or db.example.com diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl index 5bc37bb62..db04d250e 100644 --- a/src/couch/src/couch_key_tree.erl +++ b/src/couch/src/couch_key_tree.erl @@ -27,7 +27,7 @@ %% and C. We now have two key trees, A->B and A->C. When we go to replicate a %% second time, the key tree must combine these two trees which gives us %% A->(B|C). This is how conflicts are introduced. In terms of the key tree, we -%% say that we have two leaves (B and C) that are not deleted. The presense of +%% say that we have two leaves (B and C) that are not deleted. The presence of %% the multiple leaves indicate conflict. To remove a conflict, one of the %% edits (B or C) can be deleted, which results in, A->(B|C->D) where D is an %% edit that is specially marked with the a deleted=true flag. @@ -110,7 +110,7 @@ merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes} = Tree, MergeAcc) -> % Its helpful to note that this whole moving into sub-branches is due % to how we store trees that have been stemmed. When a path is % stemmed so that the root node is lost, we wrap it in a tuple with - % the number keys that have been droped. This number is the depth + % the number keys that have been dropped. This number is the depth % value that's used throughout this module. case merge_at([Nodes], Depth - IDepth, [INodes]) of {[Merged], Result} -> diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl index 47e994300..d71fc059c 100644 --- a/src/couch/src/couch_util.erl +++ b/src/couch/src/couch_util.erl @@ -293,7 +293,7 @@ rand32() -> % given a pathname "../foo/bar/" it gives back the fully qualified % absolute pathname. abs_pathname(" " ++ Filename) -> - % strip leading whitspace + % strip leading whitespace abs_pathname(Filename); abs_pathname([$/ | _] = Filename) -> Filename; @@ -315,7 +315,7 @@ abs_pathname(Filename, Dir) -> OutFilename end. -% if this as an executable with arguments, seperate out the arguments +% if this as an executable with arguments, separate out the arguments % ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"} separate_cmd_args("", CmdAcc) -> {lists:reverse(CmdAcc), ""}; @@ -380,7 +380,7 @@ drop_dot_couch_ext(DbName) when is_binary(DbName) -> drop_dot_couch_ext(DbName) when is_list(DbName) -> binary_to_list(drop_dot_couch_ext(iolist_to_binary(DbName))). -% takes a heirarchical list of dirs and removes the dots ".", double dots +% takes a hierarchical list of dirs and removes the dots ".", double dots % ".." and the corresponding parent dirs. fix_path_list([], Acc) -> lists:reverse(Acc); diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl index 1079678da..ea28d4040 100644 --- a/src/couch/test/eunit/couch_js_tests.erl +++ b/src/couch/test/eunit/couch_js_tests.erl @@ -71,7 +71,7 @@ should_roundtrip_utf8() -> ?assertEqual([[[<<16#C3, 16#84>>, <<16#C3, 16#9C>>]]], Result). should_roundtrip_modified_utf8() -> - % Mimicing the test case from the mailing list + % Mimicking the test case from the mailing list Src = << "function(doc) {\n" " emit(doc.value.toLowerCase(), \"", diff --git a/src/couch_log/src/couch_log_trunc_io_fmt.erl b/src/couch_log/src/couch_log_trunc_io_fmt.erl index cf18019ad..40f3248c2 100644 --- a/src/couch_log/src/couch_log_trunc_io_fmt.erl +++ b/src/couch_log/src/couch_log_trunc_io_fmt.erl @@ -118,7 +118,7 @@ pad_char(Fmt, Args) -> {$\s, Fmt, Args}. %% collect_cc([FormatChar], [Argument]) -> %% {Control,[ControlArg],[FormatChar],[Arg]}. -%% Here we collect the argments for each control character. +%% Here we collect the arguments for each control character. %% Be explicit to cause failure early. collect_cc([$w | Fmt], [A | Args]) -> {$w, [A], Fmt, Args}; diff --git a/src/couch_log/test/eunit/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl index d516c2bc5..a4de74990 100644 --- a/src/couch_log/test/eunit/couch_log_formatter_test.erl +++ b/src/couch_log/test/eunit/couch_log_formatter_test.erl @@ -811,7 +811,7 @@ coverage_test() -> lists:flatten(couch_log_formatter:format_trace(Trace)) ), - % Excercising print_silly_list + % Exercising print_silly_list ?assertMatch( #log_entry{ level = error, @@ -826,7 +826,7 @@ coverage_test() -> ) ), - % Excercising print_silly_list + % Exercising print_silly_list ?assertMatch( #log_entry{ level = error, diff --git a/src/couch_prometheus/src/couch_prometheus_util.erl b/src/couch_prometheus/src/couch_prometheus_util.erl index ea2cdf737..255df6876 100644 --- a/src/couch_prometheus/src/couch_prometheus_util.erl +++ b/src/couch_prometheus/src/couch_prometheus_util.erl @@ -113,7 +113,7 @@ to_prom_summary(Path, Info) -> {n, Count} = lists:keyfind(n, 1, Value), Quantiles = lists:map( fun({Perc, Val0}) -> - % Prometheus uses seconds, so we need to covert milliseconds to seconds + % Prometheus uses seconds, so we need to convert milliseconds to seconds Val = Val0 / 1000, case Perc of 50 -> {[{quantile, <<"0.5">>}], Val}; diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl index e5c024f7e..712a771a4 100644 --- a/src/couch_replicator/src/couch_replicator_auth.erl +++ b/src/couch_replicator/src/couch_replicator_auth.erl @@ -31,7 +31,7 @@ % Note for plugin developers: consider using the "auth" field in the source and % target objects to store credentials. In that case non-owner and non-admin % users will have those credentials stripped when they read the replication -% document, which mimicks the behavior for "headers" and user and pass fields +% document, which mimics the behavior for "headers" and user and pass fields % in endpoint URLs". -callback initialize(#httpdb{}) -> diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl index d29600706..643738ca3 100644 --- a/src/couch_replicator/src/couch_replicator_auth_session.erl +++ b/src/couch_replicator/src/couch_replicator_auth_session.erl @@ -35,7 +35,7 @@ % % * If last request has an auth failure, check if request used a stale cookie % In this case nothing is done, and the client is told to retry. Next time -% it updates its headers befor the request it should pick up the latest +% it updates its headers before the request it should pick up the latest % cookie. % % * If last request failed and cookie was the latest known cookie, schedule a @@ -453,7 +453,7 @@ update_cookie(#state{epoch = Epoch} = State, Cookie, NowSec, MaxAge) -> next_refresh(NowSec, undefined, RefreshInterval) -> NowSec + RefreshInterval; next_refresh(NowSec, MaxAge, _) when is_integer(MaxAge) -> - % Apply a fudge factor to account for delays in receving the cookie + % Apply a fudge factor to account for delays in receiving the cookie % and / or time adjustments happening over a longer period of time NowSec + trunc(MaxAge * 0.9). diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index bcab46747..325f3eb65 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -373,7 +373,7 @@ save_rep_doc(DbName, Doc) -> try couch_db:update_doc(Db, Doc, []) catch - % User can accidently write a VDU which prevents _replicator from + % User can accidentally write a VDU which prevents _replicator from % updating replication documents. Avoid crashing replicator and thus % preventing all other replication jobs on the node from running. throw:{forbidden, Reason} -> diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl index 67e3f8474..811c9dd1d 100644 --- a/src/couch_replicator/src/couch_replicator_httpc.erl +++ b/src/couch_replicator/src/couch_replicator_httpc.erl @@ -350,7 +350,7 @@ total_error_time_exceeded(#httpdb{first_error_timestamp = nil}) -> false; total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) -> HealthThresholdSec = couch_replicator_scheduler:health_threshold(), - % Theshold value is halved because in the calling code the next step + % Threshold value is halved because in the calling code the next step % is a doubling. Not halving here could mean sleeping too long and % exceeding the health threshold. ThresholdUSec = (HealthThresholdSec / 2) * 1000000, diff --git a/src/couch_replicator/src/json_stream_parse.erl b/src/couch_replicator/src/json_stream_parse.erl index 3478b9830..a76c1dfff 100644 --- a/src/couch_replicator/src/json_stream_parse.erl +++ b/src/couch_replicator/src/json_stream_parse.erl @@ -25,7 +25,7 @@ % tuple is the data itself, and the second element is a function to be called % next to get the next chunk of data in the stream. % -% The EventFun is called everytime a json element is parsed. It must produce +% The EventFun is called every time a json element is parsed. It must produce % a new function to be called for the next event. % % Events happen each time a new element in the json string is parsed. diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index 5656ffc0b..9202ce071 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -181,7 +181,7 @@ composite_indexes(Indexes, FieldRanges) -> fun(Idx, Acc) -> Cols = mango_idx:columns(Idx), Prefix = composite_prefix(Cols, FieldRanges), - % Calcuate the difference between the FieldRanges/Selector + % Calculate the difference between the FieldRanges/Selector % and the Prefix. We want to select the index with a prefix % that is as close to the FieldRanges as possible PrefixDifference = length(FieldRanges) - length(Prefix), diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl index be2616ff5..584b2dffb 100644 --- a/src/mango/src/mango_selector.erl +++ b/src/mango/src/mango_selector.erl @@ -61,7 +61,7 @@ match_int(Selector, {Props}) -> match(Selector, {Props}, fun mango_json:cmp/2). % Convert each operator into a normalized version as well -% as convert an implict operators into their explicit +% as convert an implicit operators into their explicit % versions. norm_ops({[{<<"$and">>, Args}]}) when is_list(Args) -> {[{<<"$and">>, [norm_ops(A) || A <- Args]}]}; @@ -197,7 +197,7 @@ norm_ops(Value) -> % % Its important to note that we can only normalize % field names like this through boolean operators where -% we can gaurantee commutativity. We can't necessarily +% we can guarantee commutativity. We can't necessarily % do the same through the '$elemMatch' or '$allMatch' % operators but we can apply the same algorithm to its % arguments. diff --git a/src/mango/src/mango_selector_text.erl b/src/mango/src/mango_selector_text.erl index aaa1e3329..ab7a63f01 100644 --- a/src/mango/src/mango_selector_text.erl +++ b/src/mango/src/mango_selector_text.erl @@ -303,7 +303,7 @@ get_range(max, _Arg) -> field_exists_query(Path) -> % We specify two here for :* and .* so that we don't incorrectly % match a path foo.name against foo.name_first (if were to just - % appened * isntead). + % append * instead). Parts = [ % We need to remove the period from the path list to indicate that it is % a path separator. We escape the colon because it is not used as a diff --git a/test/elixir/README.md b/test/elixir/README.md index 51f83ef36..efd0e2d92 100644 --- a/test/elixir/README.md +++ b/test/elixir/README.md @@ -116,7 +116,7 @@ X means done, - means partially Elixir has a number of benefits which makes writing unit tests easier. For example it is trivial to do codegeneration of tests. -Bellow we present a few use cases where code-generation is really helpful. +Below we present a few use cases where code-generation is really helpful. ## How to write ExUnit tests diff --git a/test/elixir/test/bulk_docs_test.exs b/test/elixir/test/bulk_docs_test.exs index 1a7c11045..d81b335ec 100644 --- a/test/elixir/test/bulk_docs_test.exs +++ b/test/elixir/test/bulk_docs_test.exs @@ -108,13 +108,13 @@ defmodule BulkDocsTest do end @tag :with_db - test "bulk docs raises error for invlaid `docs` parameter", ctx do + test "bulk docs raises error for invalid `docs` parameter", ctx do resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", body: %{docs: "foo"}) assert_bad_request(resp, "`docs` parameter must be an array.") end @tag :with_db - test "bulk docs raises error for invlaid `new_edits` parameter", ctx do + test "bulk docs raises error for invalid `new_edits` parameter", ctx do opts = [body: %{docs: [], new_edits: 0}] resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", opts) assert_bad_request(resp, "`new_edits` parameter must be a boolean.") diff --git a/test/elixir/test/changes_test.exs b/test/elixir/test/changes_test.exs index e3e8ba784..8547af59f 100644 --- a/test/elixir/test/changes_test.exs +++ b/test/elixir/test/changes_test.exs @@ -86,7 +86,7 @@ defmodule ChangesTest do end @tag :with_db - test "non-existing desing doc for filtered changes", context do + test "non-existing design doc for filtered changes", context do db_name = context[:db_name] resp = Couch.get("/#{db_name}/_changes?filter=nothingtosee/bop") assert resp.status_code == 404 @@ -101,7 +101,7 @@ defmodule ChangesTest do end @tag :with_db - test "non-existing desing doc and funcion for filtered changes", context do + test "non-existing design doc and function for filtered changes", context do db_name = context[:db_name] resp = Couch.get("/#{db_name}/_changes?filter=nothingtosee/movealong") assert resp.status_code == 404 diff --git a/test/elixir/test/config/suite.elixir b/test/elixir/test/config/suite.elixir index e071da87f..265e600db 100644 --- a/test/elixir/test/config/suite.elixir +++ b/test/elixir/test/config/suite.elixir @@ -90,8 +90,8 @@ "bulk docs emits conflict error for duplicate doc `_id`s", "bulk docs raises conflict error for combined update & delete", "bulk docs raises error for `all_or_nothing` option", - "bulk docs raises error for invlaid `docs` parameter", - "bulk docs raises error for invlaid `new_edits` parameter", + "bulk docs raises error for invalid `docs` parameter", + "bulk docs raises error for invalid `new_edits` parameter", "bulk docs raises error for missing `docs` parameter", "bulk docs supplies `id` if not provided in doc" ], @@ -120,8 +120,8 @@ "erlang function filtered changes", "function filtered changes", "map function filtered changes", - "non-existing desing doc and funcion for filtered changes", - "non-existing desing doc for filtered changes", + "non-existing design doc and function for filtered changes", + "non-existing design doc for filtered changes", "non-existing function for filtered changes" ], "CoffeeTest": [ diff --git a/test/elixir/test/cookie_auth_test.exs b/test/elixir/test/cookie_auth_test.exs index d7971868a..6e42963f0 100644 --- a/test/elixir/test/cookie_auth_test.exs +++ b/test/elixir/test/cookie_auth_test.exs @@ -234,7 +234,7 @@ defmodule CookieAuthTest do {:password, "eh, Boo-Boo?"} ]) - # make sure we cant create duplicate users + # make sure we can't create duplicate users create_doc_expect_error(@users_db, duplicate_jchris_user_doc, 409, "conflict") # we can't create _names |