summaryrefslogtreecommitdiff
path: root/rel/overlay/etc/default.ini
blob: 17a9a4f3d959b956b13936fd46ba4580d050476f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
; Upgrading CouchDB will overwrite this file.
[vendor]
name = {{package_author_name}}

[couchdb]
uuid = {{uuid}}
database_dir = {{data_dir}}
view_index_dir = {{view_index_dir}}
; util_driver_dir =
; plugin_dir =
os_process_timeout = 5000 ; 5 seconds. for view and external servers.
max_dbs_open = 500
delayed_commits = false
; Method used to compress everything that is appended to database and view index files, except
; for attachments (see the attachments section). Available methods are:
;
; none         - no compression
; snappy       - use google snappy, a very fast compressor/decompressor
; deflate_[N]  - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
;                lowest compression ratio) to 9 (slowest, highest compression ratio)
file_compression = snappy
; Higher values may give better read performance due to less read operations
; and/or more OS page cache hits, but they can also increase overall response
; time for writes when there are many attachment write requests in parallel.
attachment_stream_buffer_size = 4096
; Default security object for databases if not explicitly set
; everyone - same as couchdb 1.0, everyone can read/write
; admin_only - only admins can read/write
; admin_local - sharded dbs on :5984 are read/write for everyone,
;               local dbs on :5986 are read/write for admins only
default_security = admin_local
; btree_chunk_size = 1279
; maintenance_mode = false
; stem_interactive_updates = true
; update_lru_on_read = true
; uri_file =
; The speed of processing the _changes feed with doc_ids filter can be
; influenced directly with this setting - increase for faster processing at the
; expense of more memory usage.
changes_doc_ids_optimization_threshold = 100
; Maximum document ID length. Can be set to an integer or 'infinity'.
;max_document_id_length = infinity
;
; Limit maximum document size. Requests to create / update documents with a body
; size larger than this will fail with a 413 http error. This limit applies to
; requests which update a single document as well as individual documents from
; a _bulk_docs request. Since there is no canonical size of json encoded data,
; due to variabiliy in what is escaped or how floats are encoded, this limit is
; applied conservatively. For example 1.0e+16 could be encoded as 1e16, so 4 used
; for size calculation instead of 7.
;max_document_size = 4294967296 ; bytes

; Maximum attachment size.
; max_attachment_size = infinity

[cluster]
q=8
n=3
; placement = metro-dc-a:2,metro-dc-b:1

[chttpd]
; These settings affect the main, clustered port (5984 by default).
port = {{cluster_port}}
bind_address = 127.0.0.1
backlog = 512
docroot = {{fauxton_root}}
socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
require_valid_user = false
; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
; If Server header is left out, Mochiweb will add its own one in.
prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
;
; Limit maximum number of databases when tying to get detailed information using
; _dbs_info in a request
max_db_number_for_dbs_info_req = 100

[database_compaction]
; larger buffer sizes can originate smaller files
doc_buffer_size = 524288 ; value in bytes
checkpoint_after = 5242880 ; checkpoint after every N bytes were written

[view_compaction]
; larger buffer sizes can originate smaller files
keyvalue_buffer_size = 2097152 ; value in bytes

[couch_peruser]
; If enabled, couch_peruser ensures that a private per-user database
; exists for each document in _users. These databases are writable only
; by the corresponding user. Databases are in the following form:
; userdb-{hex encoded username}
enable = false
; If set to true and a user is deleted, the respective database gets
; deleted as well.
delete_dbs = false
; Set a default q value for peruser-created databases that is different from
; cluster / q
;q = 1
; prefix for user databases. If you change this after user dbs have been
; created, the existing databases won't get deleted if the associated user
; gets deleted because of the then prefix mismatch.
database_prefix = userdb-

[httpd]
port = {{backend_port}}
bind_address = 127.0.0.1
authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
default_handler = {couch_httpd_db, handle_request}
secure_rewrites = true
vhost_global_handlers = _utils, _uuids, _session, _users
allow_jsonp = false
; Options for the MochiWeb HTTP server.
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
; For more socket options, consult Erlang's module 'inet' man page.
;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
socket_options = [{recbuf, 262144}, {sndbuf, 262144}]
enable_cors = false
enable_xframe_options = false
; CouchDB can optionally enforce a maximum uri length;
; max_uri_length = 8000
; changes_timeout = 60000
; config_whitelist = 
; max_uri_length = 
; rewrite_limit = 100
; x_forwarded_host = X-Forwarded-Host
; x_forwarded_proto = X-Forwarded-Proto
; x_forwarded_ssl = X-Forwarded-Ssl
; Maximum allowed http request size. Applies to both clustered and local port.
max_http_request_size = 67108864 ; 64 MB

; [httpd_design_handlers]
; _view = 

; [ioq]
; concurrency = 10
; ratio = 0.01

[ssl]
port = 6984

; [chttpd_auth]
; authentication_db = _users

; [chttpd_auth_cache]
; max_lifetime = 600000
; max_objects = 
; max_size = 104857600

; [mem3]
; nodes_db = _nodes
; shard_cache_size = 25000
; shards_db = _dbs
; sync_concurrency = 10

; [fabric]
; all_docs_concurrency = 10
; changes_duration = 
; shard_timeout_factor = 2
; uuid_prefix_len = 7

; [rexi]
; buffer_count = 2000
; server_per_node = false

; [global_changes]
; max_event_delay = 25
; max_write_delay = 500
; update_db = true

; [view_updater]
; min_writer_items = 100
; min_writer_size = 16777216

[couch_httpd_auth]
; WARNING! This only affects the node-local port (5986 by default).
; You probably want the settings under [chttpd].
authentication_db = _users
authentication_redirect = /_utils/session.html
require_valid_user = false
timeout = 600 ; number of seconds before automatic logout
auth_cache_size = 50 ; size is number of cache entries
allow_persistent_cookies = false ; set to true to allow persistent cookies
iterations = 10 ; iterations for password hashing
; min_iterations = 1
; max_iterations = 1000000000
; password_scheme = pbkdf2
; proxy_use_secret = false
; comma-separated list of public fields, 404 if empty
; public_fields =
; secret = 
; users_db_public = false
; cookie_domain = example.com

; CSP (Content Security Policy) Support for _utils
[csp]
enable = true
; header_value = default-src 'self'; img-src 'self'; font-src *; script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';

[cors]
credentials = false
; List of origins separated by a comma, * means accept all
; Origins must include the scheme: http://example.com
; You can't set origins: * and credentials = true at the same time.
;origins = *
; List of accepted headers separated by a comma
; headers =
; List of accepted methods
; methods =

; Configuration for a vhost
;[cors:http://example.com]
; credentials = false
; List of origins separated by a comma
; Origins must include the scheme: http://example.com
; You can't set origins: * and credentials = true at the same time.
;origins =
; List of accepted headers separated by a comma
; headers =
; List of accepted methods
; methods =

; Configuration for the design document cache
;[ddoc_cache]
; The maximum size of the cache in bytes
;max_size = 104857600 ; 100MiB
; The period each cache entry should wait before
; automatically refreshing in milliseconds
;refresh_timeout = 67000

[x_frame_options]
; Settings same-origin will return X-Frame-Options: SAMEORIGIN.
; If same origin is set, it will ignore the hosts setting
; same_origin = true
; Settings hosts will return X-Frame-Options: ALLOW-FROM https://example.com/
; List of hosts separated by a comma. * means accept all
; hosts =

[query_servers]
javascript = {{prefix}}/bin/couchjs {{prefix}}/share/server/main.js
coffeescript = {{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js

; enable mango query engine
[native_query_servers]
query = {mango_native_proc, start_link, []}

; Changing reduce_limit to false will disable reduce_limit.
; If you think you're hitting reduce_limit with a "good" reduce function,
; please let us know on the mailing list so we can fine tune the heuristic.
[query_server_config]
; commit_freq = 5
reduce_limit = true
os_process_limit = 100
; os_process_idle_limit = 300
; os_process_soft_limit = 100
; Timeout for how long a response from a busy view group server can take.
; "infinity" is also a valid configuration value.
;group_info_timeout = 5000

[daemons]
index_server={couch_index_server, start_link, []}
external_manager={couch_external_manager, start_link, []}
query_servers={couch_proc_manager, start_link, []}
vhosts={couch_httpd_vhost, start_link, []}
httpd={couch_httpd, start_link, []}
uuids={couch_uuids, start, []}
auth_cache={couch_auth_cache, start_link, []}
os_daemons={couch_os_daemons, start_link, []}
compaction_daemon={couch_compaction_daemon, start_link, []}

[mango]
; Set to true to disable the "index all fields" text index, which can lead
; to out of memory issues when users have documents with nested array fields.
;index_all_disabled = false
; Default limit value for mango _find queries.
;default_limit = 25

[indexers]
couch_mrview = true

[httpd_global_handlers]
/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "{{prefix}}/share/www"}

_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "{{prefix}}/share/www"}
_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
_config = {couch_httpd_misc_handlers, handle_config_req}
_replicate = {couch_replicator_httpd, handle_req}
_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
_restart = {couch_httpd_misc_handlers, handle_restart_req}
_stats = {couch_stats_httpd, handle_stats_req}
_session = {couch_httpd_auth, handle_session_req}
_plugins = {couch_plugins_httpd, handle_req}
_system = {chttpd_misc, handle_system_req}

[httpd_db_handlers]
_all_docs = {couch_mrview_http, handle_all_docs_req}
_local_docs = {couch_mrview_http, handle_local_docs_req}
_design_docs = {couch_mrview_http, handle_design_docs_req}
_changes = {couch_httpd_db, handle_db_changes_req}
_compact = {couch_httpd_db, handle_compact_req}
_design = {couch_httpd_db, handle_design_req}
_temp_view = {couch_mrview_http, handle_temp_view_req}
_view_cleanup = {couch_mrview_http, handle_cleanup_req}

; The external module takes an optional argument allowing you to narrow it to a
; single script. Otherwise the script name is inferred from the first path section
; after _external's own path.
; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
; _external = {couch_httpd_external, handle_external_req}

[httpd_design_handlers]
_compact = {couch_mrview_http, handle_compact_req}
_info = {couch_mrview_http, handle_info_req}
_list = {couch_mrview_show, handle_view_list_req}
_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
_show = {couch_mrview_show, handle_doc_show_req}
_update = {couch_mrview_show, handle_doc_update_req}
_view = {couch_mrview_http, handle_view_req}
_view_changes = {couch_mrview_http, handle_view_changes_req}

; enable external as an httpd handler, then link it with commands here.
; note, this api is still under consideration.
; [external]
; mykey = /path/to/mycommand

; Here you can setup commands for CouchDB to manage
; while it is alive. It will attempt to keep each command
; alive if it exits.
; [os_daemons]
; some_daemon_name = /path/to/script -with args
; [os_daemon_settings]
; max_retries = 3
; retry_time = 5


[uuids]
; Known algorithms:
;   random - 128 bits of random awesome
;     All awesome, all the time.
;   sequential - monotonically increasing ids with random increments
;     First 26 hex characters are random. Last 6 increment in
;     random amounts until an overflow occurs. On overflow, the
;     random prefix is regenerated and the process starts over.
;   utc_random - Time since Jan 1, 1970 UTC with microseconds
;     First 14 characters are the time in hex. Last 18 are random.
;   utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
;     First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
algorithm = sequential
; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
utc_id_suffix =
# Maximum number of UUIDs retrievable from /_uuids in a single request
max_count = 1000

[attachments]
compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
compressible_types = text/*, application/javascript, application/json, application/xml

[replicator]
; Random jitter applied on replication job startup (milliseconds)
startup_jitter = 5000
; Number of actively running replications
max_jobs = 500
;Scheduling interval in milliseconds. During each reschedule cycle
interval = 60000
; Maximum number of replications to start and stop during rescheduling.
max_churn = 20
; More worker processes can give higher network throughput but can also
; imply more disk and network IO.
worker_processes = 4
; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
; also reduce the total amount of used RAM memory.
worker_batch_size = 500
; Maximum number of HTTP connections per replication.
http_connections = 20
; HTTP connection timeout per replication.
; Even for very fast/reliable networks it might need to be increased if a remote
; database is too busy.
connection_timeout = 30000
; Request timeout
;request_timeout = infinity
; If a request fails, the replicator will retry it up to N times.
retries_per_request = 5
; Use checkpoints
;use_checkpoints = true
; Checkpoint interval
;checkpoint_interval = 30000
; Some socket options that might boost performance in some scenarios:
;       {nodelay, boolean()}
;       {sndbuf, integer()}
;       {recbuf, integer()}
;       {priority, integer()}
; See the `inet` Erlang module's man page for the full list of options.
socket_options = [{keepalive, true}, {nodelay, false}]
; Path to a file containing the user's certificate.
;cert_file = /full/path/to/server_cert.pem
; Path to file containing user's private PEM encoded key.
;key_file = /full/path/to/server_key.pem
; String containing the user's password. Only used if the private keyfile is password protected.
;password = somepassword
; Set to true to validate peer certificates.
verify_ssl_certificates = false
; File containing a list of peer trusted certificates (in the PEM format).
;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
; Maximum peer certificate depth (must be set even if certificate validation is off).
ssl_certificate_max_depth = 3
; Maximum document ID length for replication.
;max_document_id_length = 0
; How much time to wait before retrying after a missing doc exception. This
; exception happens if the document was seen in the changes feed, but internal
; replication hasn't caught up yet, and fetching document's revisions
; fails. This a common scenario when source is updated while continous
; replication is running. The retry period would depend on how quickly internal
; replication is expected to catch up. In general this is an optimisation to
; avoid crashing the whole replication job, which would consume more resources
; and add log noise.
;missing_doc_retry_msec = 2000
; Wait this many seconds after startup before attaching changes listeners
; cluster_start_period = 5
; Re-check cluster state at least every cluster_quiet_period seconds
; cluster_quiet_period = 60

[compaction_daemon]
; The delay, in seconds, between each check for which database and view indexes
; need to be compacted.
check_interval = 300
; If a database or view index file is smaller then this value (in bytes),
; compaction will not happen. Very small files always have a very high
; fragmentation therefore it's not worth to compact them.
min_file_size = 131072

[compactions]
; List of compaction rules for the compaction daemon.
; The daemon compacts databases and their respective view groups when all the
; condition parameters are satisfied. Configuration can be per database or
; global, and it has the following format:
;
; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
;
; Possible parameters:
;
; * db_fragmentation - If the ratio (as an integer percentage), of the amount
;                      of old data (and its supporting metadata) over the database
;                      file size is equal to or greater then this value, this
;                      database compaction condition is satisfied.
;                      This value is computed as:
;
;                           (file_size - data_size) / file_size * 100
;
;                      The data_size and file_size values can be obtained when
;                      querying a database's information URI (GET /dbname/).
;
; * view_fragmentation - If the ratio (as an integer percentage), of the amount
;                        of old data (and its supporting metadata) over the view
;                        index (view group) file size is equal to or greater then
;                        this value, then this view index compaction condition is
;                        satisfied. This value is computed as:
;
;                            (file_size - data_size) / file_size * 100
;
;                        The data_size and file_size values can be obtained when
;                        querying a view group's information URI
;                        (GET /dbname/_design/groupname/_info).
;
; * from _and_ to - The period for which a database (and its view groups) compaction
;                   is allowed. The value for these parameters must obey the format:
;
;                   HH:MM - HH:MM  (HH in [0..23], MM in [0..59])
;
; * strict_window - If a compaction is still running after the end of the allowed
;                   period, it will be canceled if this parameter is set to 'true'.
;                   It defaults to 'false' and it's meaningful only if the *period*
;                   parameter is also specified.
;
; * parallel_view_compaction - If set to 'true', the database and its views are
;                              compacted in parallel. This is only useful on
;                              certain setups, like for example when the database
;                              and view index directories point to different
;                              disks. It defaults to 'false'.
;
; Before a compaction is triggered, an estimation of how much free disk space is
; needed is computed. This estimation corresponds to 2 times the data size of
; the database or view index. When there's not enough free disk space to compact
; a particular database or view index, a warning message is logged.
;
; Examples:
;
; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
;    The `foo` database is compacted if its fragmentation is 70% or more.
;    Any view index of this database is compacted only if its fragmentation
;    is 60% or more.
;
; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
;    Similar to the preceding example but a compaction (database or view index)
;    is only triggered if the current time is between midnight and 4 AM.
;
; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
;    Similar to the preceding example - a compaction (database or view index)
;    is only triggered if the current time is between midnight and 4 AM. If at
;    4 AM the database or one of its views is still compacting, the compaction
;    process will be canceled.
;
; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
;    Similar to the preceding example, but a database and its views can be
;    compacted in parallel.
;
_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]

[log]
; Possible log levels:
;  debug
;  info
;  notice
;  warning, warn
;  error, err
;  critical, crit
;  alert
;  emergency, emerg
;  none
;
level = info
;
; Set the maximum log message length in bytes that will be
; passed through the writer
;
; max_message_size = 16000
;
;
; There are three different log writers that can be configured
; to write log messages. The default writes to stderr of the
; Erlang VM which is useful for debugging/development as well
; as a lot of container deployments.
;
; There's also a file writer that works with logrotate and an
; rsyslog writer for deployments that need to have logs sent
; over the network.
;
writer = stderr
;
; File Writer Options:
;
; The file writer will check every 30s to see if it needs
; to reopen its file. This is useful for people that configure
; logrotate to move log files periodically.
;
; file = ./couch.log ; Path name to write logs to
;
; Write operations will happen either every write_buffer bytes
; or write_delay milliseconds. These are passed directly to the
; Erlang file module with the write_delay option documented here:
;
;     http://erlang.org/doc/man/file.html
;
; write_buffer = 0
; write_delay = 0
;
;
; Syslog Writer Options:
;
; The syslog writer options all correspond to their obvious
; counter parts in rsyslog nomenclature.
;
; syslog_host =
; syslog_port = 514
; syslog_appid = couchdb
; syslog_facility = local2

[stats]
; Stats collection interval in seconds. Default 10 seconds.
;interval = 10